add fuzz tests for std.atomic.Stack

master
Andrew Kelley 2018-04-28 17:53:06 -04:00
parent 4ac36d094c
commit 96ecb40259
3 changed files with 143 additions and 13 deletions

View File

@ -18184,6 +18184,11 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr
} else {
if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering))
return ira->codegen->builtin_types.entry_invalid;
if (ordering == AtomicOrderUnordered) {
ir_add_error(ira, instruction->ordering,
buf_sprintf("@atomicRmw atomic ordering must not be Unordered"));
return ira->codegen->builtin_types.entry_invalid;
}
}
if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)

View File

@ -1,3 +1,6 @@
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Stack(comptime T: type) type {
return struct {
@ -20,26 +23,100 @@ pub fn Stack(comptime T: type) type {
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: &Self, node: &Node) ?&Node {
node.next = null;
return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.AcqRel, AtomicOrder.AcqRel);
return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
}
pub fn push(self: &Self, node: &Node) void {
var root = @atomicLoad(?&Node, &self.root, AtomicOrder.Acquire);
var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.Release, AtomicOrder.Acquire) ?? break;
root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
}
}
pub fn pop(self: &Self) ?&Node {
var root = @atomicLoad(?&Node, &self.root, AtomicOrder.Acquire);
while (true) {
root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.Release, AtomicOrder.Acquire) ?? return root;
root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
}
}
pub fn isEmpty(self: &Self) bool {
return @atomicLoad(?&Node, &self.root, AtomicOrder.Relaxed) == null;
return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null;
}
};
}
const std = @import("std");
const Context = struct {
allocator: &std.mem.Allocator,
stack: &Stack(i32),
put_sum: isize,
get_sum: isize,
puts_done: u8, // TODO make this a bool
};
const puts_per_thread = 1000;
const put_thread_count = 3;
test "std.atomic.stack" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 64 * 1024 * 1024);
defer direct_allocator.allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var stack = Stack(i32).init();
var context = Context {
.allocator = a,
.stack = &stack,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
};
var putters: [put_thread_count]&std.os.Thread = undefined;
for (putters) |*t| {
*t = try std.os.spawnThreadAllocator(a, &context, startPuts);
}
var getters: [put_thread_count]&std.os.Thread = undefined;
for (getters) |*t| {
*t = try std.os.spawnThreadAllocator(a, &context, startGets);
}
for (putters) |t| t.wait();
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
for (getters) |t| t.wait();
std.debug.assert(context.put_sum == context.get_sum);
}
fn startPuts(ctx: &Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
node.data = x;
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
return 0;
}
fn startGets(ctx: &Context) u8 {
while (true) {
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
}
if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) {
break;
}
}
return 0;
}

View File

@ -47,13 +47,6 @@ pub const DirectAllocator = struct {
const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void;
//pub const canary_bytes = []u8 {48, 239, 128, 46, 18, 49, 147, 9, 195, 59, 203, 3, 245, 54, 9, 122};
//pub const want_safety = switch (builtin.mode) {
// builtin.Mode.Debug => true,
// builtin.Mode.ReleaseSafe => true,
// else => false,
//};
pub fn init() DirectAllocator {
return DirectAllocator {
.allocator = Allocator {
@ -298,7 +291,7 @@ pub const FixedBufferAllocator = struct {
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.buffer[self.end_index]);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
@ -325,6 +318,54 @@ pub const FixedBufferAllocator = struct {
fn free(allocator: &Allocator, bytes: []u8) void { }
};
/// lock free
pub const ThreadSafeFixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
return ThreadSafeFixedBufferAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.buffer = buffer,
.end_index = 0,
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index,
builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index .. new_end_index];
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
test "c_allocator" {
@ -363,6 +404,13 @@ test "FixedBufferAllocator" {
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
test "ThreadSafeFixedBufferAllocator" {
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
fn testAllocator(allocator: &mem.Allocator) !void {
var slice = try allocator.alloc(&i32, 100);