remove the valgrind integration with std.mem.Allocator

See #1837
master
Andrew Kelley 2019-03-11 13:34:51 -04:00
parent 5362ca204f
commit d633dcd07a
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
3 changed files with 7 additions and 29 deletions

View File

@ -240,9 +240,8 @@ pub const ArenaAllocator = struct {
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
cur_node = try self.createNode(cur_buf.len, n + alignment);
@ -273,10 +272,6 @@ pub const FixedBufferAllocator = struct {
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
// This loop gets optimized out in ReleaseFast mode
for (buffer) |*byte| {
byte.* = undefined;
}
return FixedBufferAllocator{
.allocator = Allocator{
.allocFn = alloc,
@ -291,9 +286,8 @@ pub const FixedBufferAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
@ -330,7 +324,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
if (builtin.single_threaded) {
break :blk FixedBufferAllocator;
} else {
/// lock free
// lock free
break :blk struct {
allocator: Allocator,
end_index: usize,
@ -353,9 +347,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = end_index + march_forward_bytes;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;

View File

@ -49,7 +49,6 @@ pub const Allocator = struct {
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
_ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
@ -63,7 +62,6 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
// This loop gets optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
byte.* = undefined;
@ -88,12 +86,6 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
if (byte_slice.ptr == old_byte_slice.ptr) {
_ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
} else {
_ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
}
if (n > old_mem.len) {
// This loop gets optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
@ -125,12 +117,6 @@ pub const Allocator = struct {
const old_byte_slice = @sliceToBytes(old_mem);
const byte_slice = self.reallocFn(self, old_byte_slice, byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
if (byte_slice.ptr == old_byte_slice.ptr) {
_ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
} else {
_ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
_ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
}
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
@ -139,7 +125,6 @@ pub const Allocator = struct {
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
_ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
};