Merge branch 'valgrind' of https://github.com/daurnimator/zig into daurnimator-valgrind

master
Andrew Kelley 2019-03-11 13:27:04 -04:00
commit 5362ca204f
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
7 changed files with 731 additions and 1 deletions

View File

@ -674,6 +674,9 @@ set(ZIG_STD_FILES
"std.zig"
"testing.zig"
"unicode.zig"
"valgrind.zig"
"valgrind/callgrind.zig"
"valgrind/memcheck.zig"
"zig.zig"
"zig/ast.zig"
"zig/parse.zig"

View File

@ -273,6 +273,10 @@ pub const FixedBufferAllocator = struct {
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
// This loop gets optimized out in ReleaseFast mode
for (buffer) |*byte| {
byte.* = undefined;
}
return FixedBufferAllocator{
.allocator = Allocator{
.allocFn = alloc,

View File

@ -49,6 +49,7 @@ pub const Allocator = struct {
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
_ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
@ -62,6 +63,7 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
// This loop gets optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
byte.* = undefined;
@ -86,6 +88,12 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
if (byte_slice.ptr == old_byte_slice.ptr) {
_ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
} else {
_ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
}
if (n > old_mem.len) {
// This loop gets optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
@ -114,8 +122,15 @@ pub const Allocator = struct {
// n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * n;
const byte_slice = self.reallocFn(self, @sliceToBytes(old_mem), byte_count, alignment) catch unreachable;
const old_byte_slice = @sliceToBytes(old_mem);
const byte_slice = self.reallocFn(self, old_byte_slice, byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
if (byte_slice.ptr == old_byte_slice.ptr) {
_ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
} else {
_ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
}
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
@ -124,6 +139,7 @@ pub const Allocator = struct {
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
_ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
};

View File

@ -45,6 +45,7 @@ pub const rb = @import("rb.zig");
pub const sort = @import("sort.zig");
pub const testing = @import("testing.zig");
pub const unicode = @import("unicode.zig");
pub const valgrind = @import("valgrind.zig");
pub const zig = @import("zig.zig");
test "std" {
@ -91,5 +92,6 @@ test "std" {
_ = @import("sort.zig");
_ = @import("testing.zig");
_ = @import("unicode.zig");
_ = @import("valgrind.zig");
_ = @import("zig.zig");
}

View File

@ -0,0 +1,87 @@
const std = @import("../index.zig");
const valgrind = std.valgrind;
pub const CallgrindClientRequest = extern enum {
DumpStats = valgrind.ToolBase("CT"),
ZeroStats,
ToggleCollect,
DumpStatsAt,
StartInstrumentation,
StopInstrumentation,
};
fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
return valgrind.doClientRequest(
default,
@intCast(usize, @enumToInt(request)),
a1, a2, a3, a4, a5);
}
fn doCallgrindClientRequestStmt(request: CallgrindClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) void
{
_ = doCallgrindClientRequestExpr(0, request, a1, a2, a3, a4, a5);
}
/// Dump current state of cost centers, and zero them afterwards
pub fn dumpStats() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStats,
0, 0, 0, 0, 0);
}
/// Dump current state of cost centers, and zero them afterwards.
/// The argument is appended to a string stating the reason which triggered
/// the dump. This string is written as a description field into the
/// profile data dump.
pub fn dumpStatsAt(pos_str: [*]u8) void {
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStatsAt,
@ptrToInt(pos_str),
0, 0, 0, 0);
}
/// Zero cost centers
pub fn zeroStats() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.ZeroStats,
0, 0, 0, 0, 0);
}
/// Toggles collection state.
/// The collection state specifies whether the happening of events
/// should be noted or if they are to be ignored. Events are noted
/// by increment of counters in a cost center
pub fn toggleCollect() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.ToggleCollect,
0, 0, 0, 0, 0);
}
/// Start full callgrind instrumentation if not already switched on.
/// When cache simulation is done, it will flush the simulated cache;
/// this will lead to an artificial cache warmup phase afterwards with
/// cache misses which would not have happened in reality.
pub fn startInstrumentation() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.StartInstrumentation,
0, 0, 0, 0, 0);
}
/// Stop full callgrind instrumentation if not already switched off.
/// This flushes Valgrinds translation cache, and does no additional
/// instrumentation afterwards, which effectivly will run at the same
/// speed as the "none" tool (ie. at minimal slowdown).
/// Use this to bypass Callgrind aggregation for uninteresting code parts.
/// To start Callgrind in this mode to ignore the setup phase, use
/// the option "--instr-atstart=no".
pub fn stopInstrumentation() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.StopInstrumentation,
0, 0, 0, 0, 0);
}

363
std/valgrind/index.zig Normal file
View File

@ -0,0 +1,363 @@
const builtin = @import("builtin");
const math = @import("index.zig").math;
pub fn doClientRequest(default: usize, request: usize,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
if (!builtin.valgrind_support) {
return default;
}
switch (builtin.arch) {
builtin.Arch.i386 => {
return asm volatile (
\\ roll $3, %%edi ; roll $13, %%edi
\\ roll $29, %%edi ; roll $19, %%edi
\\ xchgl %%ebx,%%ebx
: [_] "={edx}" (-> usize)
: [_] "{eax}" (&[]usize{request, a1, a2, a3, a4, a5}),
[_] "0" (default)
: "cc", "memory"
);
},
builtin.Arch.x86_64 => {
return asm volatile (
\\ rolq $3, %%rdi ; rolq $13, %%rdi
\\ rolq $61, %%rdi ; rolq $51, %%rdi
\\ xchgq %%rbx,%%rbx
: [_] "={rdx}" (-> usize)
: [_] "{rax}" (&[]usize{request, a1, a2, a3, a4, a5}),
[_] "0" (default)
: "cc", "memory"
);
},
// ppc32
// ppc64
// arm
// arm64
// s390x
// mips32
// mips64
else => {
return default;
}
}
}
pub const ClientRequest = extern enum {
RunningOnValgrind = 4097,
DiscardTranslations = 4098,
ClientCall0 = 4353,
ClientCall1 = 4354,
ClientCall2 = 4355,
ClientCall3 = 4356,
CountErrors = 4609,
GdbMonitorCommand = 4610,
MalloclikeBlock = 4865,
ResizeinplaceBlock = 4875,
FreelikeBlock = 4866,
CreateMempool = 4867,
DestroyMempool = 4868,
MempoolAlloc = 4869,
MempoolFree = 4870,
MempoolTrim = 4871,
MoveMempool = 4872,
MempoolChange = 4873,
MempoolExists = 4874,
Printf = 5121,
PrintfBacktrace = 5122,
PrintfValistByRef = 5123,
PrintfBacktraceValistByRef = 5124,
StackRegister = 5377,
StackDeregister = 5378,
StackChange = 5379,
LoadPdbDebuginfo = 5633,
MapIpToSrcloc = 5889,
ChangeErrDisablement = 6145,
VexInitForIri = 6401,
InnerThreads = 6402,
};
pub fn ToolBase(base: [2]u8) u32 {
return (u32(base[0]&0xff) << 24) | (u32(base[1]&0xff) << 16);
}
pub fn IsTool(base: [2]u8, code: usize) bool {
return ToolBase(base) == (code & 0xffff0000);
}
fn doClientRequestExpr(default: usize, request: ClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
return doClientRequest(
default,
@intCast(usize, @enumToInt(request)),
a1, a2, a3, a4, a5);
}
fn doClientRequestStmt(request: ClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) void
{
_ = doClientRequestExpr(0, request, a1, a2, a3, a4, a5);
}
/// Returns the number of Valgrinds this code is running under. That
/// is, 0 if running natively, 1 if running under Valgrind, 2 if
/// running under Valgrind which is running under another Valgrind,
/// etc.
pub fn runningOnValgrind() usize {
return doClientRequestExpr(0,
ClientRequest.RunningOnValgrind,
0, 0, 0, 0, 0);
}
/// Discard translation of code in the slice qzz. Useful if you are debugging
/// a JITter or some such, since it provides a way to make sure valgrind will
/// retranslate the invalidated area. Returns no value.
pub fn discardTranslations(qzz: []const u8) void {
doClientRequestStmt(ClientRequest.DiscardTranslations,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
}
pub fn innerThreads(qzz: [*]u8) void {
doClientRequestStmt(ClientRequest.InnerThreads,
qzz,
0, 0, 0, 0);
}
//pub fn printf(format: [*]const u8, args: ...) usize {
// return doClientRequestExpr(0,
// ClientRequest.PrintfValistByRef,
// @ptrToInt(format), @ptrToInt(args),
// 0, 0, 0);
//}
//pub fn printfBacktrace(format: [*]const u8, args: ...) usize {
// return doClientRequestExpr(0,
// ClientRequest.PrintfBacktraceValistByRef,
// @ptrToInt(format), @ptrToInt(args),
// 0, 0, 0);
//}
pub fn nonSIMDCall0(func: fn(usize) usize) usize {
return doClientRequestExpr(0,
ClientRequest.ClientCall0,
@ptrToInt(func),
0, 0, 0, 0);
}
pub fn nonSIMDCall1(func: fn(usize, usize) usize, a1: usize) usize {
return doClientRequestExpr(0,
ClientRequest.ClientCall1,
@ptrToInt(func), a1,
0, 0, 0);
}
pub fn nonSIMDCall2(func: fn(usize, usize, usize) usize,
a1: usize, a2: usize) usize
{
return doClientRequestExpr(0,
ClientRequest.ClientCall2,
@ptrToInt(func), a1, a2,
0, 0);
}
pub fn nonSIMDCall3(func: fn(usize, usize, usize, usize) usize,
a1: usize, a2: usize, a3: usize) usize
{
return doClientRequestExpr(0,
ClientRequest.ClientCall3,
@ptrToInt(func), a1, a2, a3,
0);
}
/// Counts the number of errors that have been recorded by a tool. Nb:
/// the tool must record the errors with VG_(maybe_record_error)() or
/// VG_(unique_error)() for them to be counted.
pub fn countErrors() usize {
return doClientRequestExpr(0, // default return
ClientRequest.CountErrors,
0, 0, 0, 0, 0);
}
pub fn mallocLikeBlock(mem: []u8, rzB: usize, is_zeroed: bool) void {
doClientRequestStmt(ClientRequest.MalloclikeBlock,
@ptrToInt(mem.ptr), mem.len, rzB, @boolToInt(is_zeroed),
0);
}
pub fn resizeInPlaceBlock(oldmem: []u8, newsize: usize, rzB: usize) void {
doClientRequestStmt(ClientRequest.ResizeinplaceBlock,
@ptrToInt(oldmem.ptr), oldmem.len, newsize, rzB,
0);
}
pub fn freeLikeBlock(addr: [*]u8, rzB: usize) void {
doClientRequestStmt(ClientRequest.FreelikeBlock,
@ptrToInt(addr), rzB,
0, 0, 0);
}
/// Create a memory pool.
pub const MempoolFlags = extern enum {
AutoFree = 1,
MetaPool = 2,
};
pub fn createMempool(pool: [*]u8, rzB: usize, is_zeroed: bool, flags: usize) void {
doClientRequestStmt(ClientRequest.CreateMempool,
@ptrToInt(pool), rzB, @boolToInt(is_zeroed), flags,
0);
}
/// Destroy a memory pool.
pub fn destroyMempool(pool: [*]u8) void {
doClientRequestStmt(ClientRequest.DestroyMempool,
pool,
0, 0, 0, 0);
}
/// Associate a piece of memory with a memory pool.
pub fn mempoolAlloc(pool: [*]u8, mem: []u8) void {
doClientRequestStmt(ClientRequest.MempoolAlloc,
@ptrToInt(pool), @ptrToInt(mem.ptr), mem.len,
0, 0);
}
/// Disassociate a piece of memory from a memory pool.
pub fn mempoolFree(pool: [*]u8, addr: [*]u8) void {
doClientRequestStmt(ClientRequest.MempoolFree,
@ptrToInt(pool), @ptrToInt(addr),
0, 0, 0);
}
/// Disassociate any pieces outside a particular range.
pub fn mempoolTrim(pool: [*]u8, mem: []u8) void {
doClientRequestStmt(ClientRequest.MempoolTrim,
@ptrToInt(pool), @ptrToInt(mem.ptr), mem.len,
0, 0);
}
/// Resize and/or move a piece associated with a memory pool.
pub fn moveMempool(poolA: [*]u8, poolB: [*]u8) void {
doClientRequestStmt(ClientRequest.MoveMempool,
@ptrToInt(poolA), @ptrToInt(poolB),
0, 0, 0);
}
/// Resize and/or move a piece associated with a memory pool.
pub fn mempoolChange(pool: [*]u8, addrA: [*]u8, mem: []u8) void {
doClientRequestStmt(ClientRequest.MempoolChange,
@ptrToInt(pool), @ptrToInt(addrA), @ptrToInt(mem.ptr), mem.len,
0);
}
/// Return if a mempool exists.
pub fn mempoolExists(pool: [*]u8) bool {
return doClientRequestExpr(0,
ClientRequest.MempoolExists,
@ptrToInt(pool),
0, 0, 0, 0) != 0;
}
/// Mark a piece of memory as being a stack. Returns a stack id.
/// start is the lowest addressable stack byte, end is the highest
/// addressable stack byte.
pub fn stackRegister(stack: []u8) usize {
return doClientRequestExpr(0,
ClientRequest.StackRegister,
@ptrToInt(stack.ptr), @ptrToInt(stack.ptr) + stack.len,
0, 0, 0);
}
/// Unmark the piece of memory associated with a stack id as being a stack.
pub fn stackDeregister(id: usize) void {
doClientRequestStmt(ClientRequest.StackDeregister,
id,
0, 0, 0, 0);
}
/// Change the start and end address of the stack id.
/// start is the new lowest addressable stack byte, end is the new highest
/// addressable stack byte.
pub fn stackChange(id: usize, newstack: []u8) void {
doClientRequestStmt(ClientRequest.StackChange,
id, @ptrToInt(newstack.ptr), @ptrToInt(newstack.ptr) + newstack.len,
0, 0);
}
// Load PDB debug info for Wine PE image_map.
// pub fn loadPdbDebuginfo(fd, ptr, total_size, delta) void {
// doClientRequestStmt(ClientRequest.LoadPdbDebuginfo,
// fd, ptr, total_size, delta,
// 0);
// }
/// Map a code address to a source file name and line number. buf64
/// must point to a 64-byte buffer in the caller's address space. The
/// result will be dumped in there and is guaranteed to be zero
/// terminated. If no info is found, the first byte is set to zero.
pub fn mapIpToSrcloc(addr: *const u8, buf64: [64]u8) usize {
return doClientRequestExpr(0,
ClientRequest.MapIpToSrcloc,
@ptrToInt(addr), @ptrToInt(&buf64[0]),
0, 0, 0);
}
/// Disable error reporting for this thread. Behaves in a stack like
/// way, so you can safely call this multiple times provided that
/// enableErrorReporting() is called the same number of times
/// to re-enable reporting. The first call of this macro disables
/// reporting. Subsequent calls have no effect except to increase the
/// number of enableErrorReporting() calls needed to re-enable
/// reporting. Child threads do not inherit this setting from their
/// parents -- they are always created with reporting enabled.
pub fn disableErrorReporting() void {
doClientRequestStmt(ClientRequest.ChangeErrDisablement,
1,
0, 0, 0, 0);
}
/// Re-enable error reporting, (see disableErrorReporting())
pub fn enableErrorReporting() void {
doClientRequestStmt(ClientRequest.ChangeErrDisablement,
math.maxInt(usize),
0, 0, 0, 0);
}
/// Execute a monitor command from the client program.
/// If a connection is opened with GDB, the output will be sent
/// according to the output mode set for vgdb.
/// If no connection is opened, output will go to the log output.
/// Returns 1 if command not recognised, 0 otherwise.
pub fn monitorCommand(command: [*]u8) bool {
return doClientRequestExpr(0,
ClientRequest.GdbMonitorCommand,
@ptrToInt(command.ptr),
0, 0, 0, 0) != 0;
}
pub const memcheck = @import("memcheck.zig");
pub const callgrind = @import("callgrind.zig");

255
std/valgrind/memcheck.zig Normal file
View File

@ -0,0 +1,255 @@
const std = @import("../index.zig");
const valgrind = std.valgrind;
pub const MemCheckClientRequest = extern enum {
MakeMemNoAccess = valgrind.ToolBase("MC"),
MakeMemUndefined,
MakeMemDefined,
Discard,
CheckMemIsAddressable,
CheckMemIsDefined,
DoLeakCheck,
CountLeaks,
GetVbits,
SetVbits,
CreateBlock,
MakeMemDefinedIfAddressable,
CountLeakBlocks,
EnableAddrErrorReportingInRange,
DisableAddrErrorReportingInRange,
};
fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
return valgrind.doClientRequest(
default,
@intCast(usize, @enumToInt(request)),
a1, a2, a3, a4, a5);
}
fn doMemCheckClientRequestStmt(request: MemCheckClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) void
{
_ = doMemCheckClientRequestExpr(0, request, a1, a2, a3, a4, a5);
}
/// Mark memory at qzz.ptr as unaddressable for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemNoAccess(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemNoAccess,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
}
/// Similarly, mark memory at qzz.ptr as addressable but undefined
/// for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemUndefined(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemUndefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
}
/// Similarly, mark memory at qzz.ptr as addressable and defined
/// for qzz.len bytes.
pub fn makeMemDefined(qzz: []u8) i1 {
// This returns -1 when run on Valgrind and 0 otherwise.
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemDefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
}
/// Similar to makeMemDefined except that addressability is
/// not altered: bytes which are addressable are marked as defined,
/// but those which are not addressable are left unchanged.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemDefinedIfAddressable,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
}
/// Create a block-description handle. The description is an ascii
/// string which is included in any messages pertaining to addresses
/// within the specified memory range. Has no other effect on the
/// properties of the memory range.
pub fn createBlock(qzz: []u8, desc: [*]u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.CreateBlock,
@ptrToInt(qzz.ptr), qzz.len, @ptrToInt(desc),
0, 0);
}
/// Discard a block-description-handle. Returns 1 for an
/// invalid handle, 0 for a valid handle.
pub fn discard(blkindex) bool {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.Discard,
0, blkindex,
0, 0, 0) != 0;
}
/// Check that memory at qzz.ptr is addressable for qzz.len bytes.
/// If suitable addressibility is not established, Valgrind prints an
/// error message and returns the address of the first offending byte.
/// Otherwise it returns zero.
pub fn checkMemIsAddressable(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0,
MemCheckClientRequest.CheckMemIsAddressable,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
}
/// Check that memory at qzz.ptr is addressable and defined for
/// qzz.len bytes. If suitable addressibility and definedness are not
/// established, Valgrind prints an error message and returns the
/// address of the first offending byte. Otherwise it returns zero.
pub fn checkMemIsDefined(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0,
MemCheckClientRequest.CheckMemIsDefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
}
/// Do a full memory leak check (like --leak-check=full) mid-execution.
pub fn doLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 0,
0, 0, 0);
}
/// Same as doLeakCheck() but only showing the entries for
/// which there was an increase in leaked bytes or leaked nr of blocks
/// since the previous leak search.
pub fn doAddedLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 1,
0, 0, 0);
}
/// Same as doAddedLeakCheck() but showing entries with
/// increased or decreased leaked bytes/blocks since previous leak
/// search.
pub fn doChangedLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 2,
0, 0, 0);
}
/// Do a summary memory leak check (like --leak-check=summary) mid-execution.
pub fn doQuickLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
1, 0,
0, 0, 0);
}
/// Return number of leaked, dubious, reachable and suppressed bytes found by
/// all previous leak checks.
const CountResult = struct {
leaked: usize,
dubious: usize,
reachable: usize,
suppressed: usize,
};
pub fn countLeaks() CountResult {
var res = CountResult {
.leaked = 0,
.dubious = 0,
.reachable = 0,
.suppressed = 0,
};
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeaks,
&res.leaked, &res.dubious,
&res.reachable, &res.suppressed,
0);
return res;
}
pub fn countLeakBlocks() CountResult {
var res = CountResult {
.leaked = 0,
.dubious = 0,
.reachable = 0,
.suppressed = 0,
};
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeakBlocks,
&res.leaked, &res.dubious,
&res.reachable, &res.suppressed,
0);
return res;
}
/// Get the validity data for addresses zza and copy it
/// into the provided zzvbits array. Return values:
/// 0 if not running on valgrind
/// 1 success
/// 2 [previously indicated unaligned arrays; these are now allowed]
/// 3 if any parts of zzsrc/zzvbits are not addressable.
/// The metadata is not copied in cases 0, 2 or 3 so it should be
/// impossible to segfault your system by using this call.
pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
return @intCast(u2, doMemCheckClientRequestExpr(0,
MemCheckClientRequest.GetVbits,
@ptrToInt(zza.ptr),
@ptrToInt(zzvbits),
zza.len,
0, 0));
}
/// Set the validity data for addresses zza, copying it
/// from the provided zzvbits array. Return values:
/// 0 if not running on valgrind
/// 1 success
/// 2 [previously indicated unaligned arrays; these are now allowed]
/// 3 if any parts of zza/zzvbits are not addressable.
/// The metadata is not copied in cases 0, 2 or 3 so it should be
/// impossible to segfault your system by using this call.
pub fn setVbits(zzvbits: []u8, zza: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
return @intCast(u2, doMemCheckClientRequestExpr(0,
MemCheckClientRequest.SetVbits,
@ptrToInt(zza.ptr),
@ptrToInt(zzvbits),
zza.len,
0, 0));
}
/// Disable and re-enable reporting of addressing errors in the
/// specified address range.
pub fn disableAddrErrorReportingInRange(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.DisableAddrErrorReportingInRange,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
}
pub fn enableAddrErrorReportingInRange(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.EnableAddrErrorReportingInRange,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
}