use atomic bools in std lib

master
Vexu 2020-03-10 22:46:19 +02:00
parent 8dc188ebe0
commit ee5b00a8b9
No known key found for this signature in database
GPG Key ID: 59AEB8936E16A6AC
7 changed files with 83 additions and 79 deletions

View File

@ -1,7 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
const expect = std.testing.expect;
@ -149,7 +147,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
@ -173,7 +171,7 @@ test "std.atomic.Queue" {
.queue = &queue,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.puts_done = false,
.get_count = 0,
};
@ -186,7 +184,7 @@ test "std.atomic.Queue" {
}
}
expect(!context.queue.isEmpty());
context.puts_done = 1;
context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
@ -208,7 +206,7 @@ test "std.atomic.Queue" {
for (putters) |t|
t.wait();
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
@ -235,25 +233,25 @@ fn startPuts(ctx: *Context) u8 {
std.time.sleep(1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
node.* = Queue(i32).Node{
node.* = .{
.prev = undefined,
.next = undefined,
.data = x,
};
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
while (ctx.queue.get()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}
if (last) return 0;

View File

@ -1,6 +1,5 @@
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const expect = std.testing.expect;
/// Many reader, many writer, non-allocating, thread-safe
@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type {
root: ?*Node,
lock: @TypeOf(lock_init),
const lock_init = if (builtin.single_threaded) {} else @as(u8, 0);
const lock_init = if (builtin.single_threaded) {} else false;
pub const Self = @This();
@ -31,7 +30,7 @@ pub fn Stack(comptime T: type) type {
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst);
}
pub fn push(self: *Self, node: *Node) void {
@ -39,8 +38,8 @@ pub fn Stack(comptime T: type) type {
node.next = self.root;
self.root = node;
} else {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
node.next = self.root;
self.root = node;
@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type {
self.root = root.next;
return root;
} else {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst) != false) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst) == true);
const root = self.root orelse return null;
self.root = root.next;
@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type {
}
pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
return @atomicLoad(?*Node, &self.root, .SeqCst) == null;
}
};
}
@ -75,7 +74,7 @@ const Context = struct {
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
@ -98,7 +97,7 @@ test "std.atomic.stack" {
.stack = &stack,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.puts_done = false,
.get_count = 0,
};
@ -109,7 +108,7 @@ test "std.atomic.stack" {
expect(startPuts(&context) == 0);
}
}
context.puts_done = 1;
context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
@ -128,7 +127,7 @@ test "std.atomic.stack" {
for (putters) |t|
t.wait();
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
}
@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 {
.data = x,
};
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst) == true;
while (ctx.stack.pop()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}
if (last) return 0;

View File

@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type {
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
dispatch_lock: u8, // TODO make this a bool
need_dispatch: u8, // TODO make this a bool
dispatch_lock: bool,
need_dispatch: bool,
// simple fixed size ring buffer
buffer_nodes: []T,
@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type {
.buffer_len = 0,
.buffer_nodes = buffer,
.buffer_index = 0,
.dispatch_lock = 0,
.need_dispatch = 0,
.dispatch_lock = false,
.need_dispatch = false,
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
@ -165,15 +165,15 @@ pub fn Channel(comptime T: type) type {
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
@atomicStore(bool, &self.need_dispatch, true, .SeqCst);
lock: while (true) {
// set the lock flag
const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst);
const prev_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst);
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
@atomicStore(bool, &self.need_dispatch, false, .SeqCst);
while (true) {
one_dispatch: {
@ -250,14 +250,14 @@ pub fn Channel(comptime T: type) type {
}
// clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
if (need_dispatch != 0) continue;
const need_dispatch = @atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst);
if (need_dispatch) continue;
const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst);
assert(my_lock != 0);
const my_lock = @atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst);
assert(my_lock);
// we have to check again now that we unlocked
if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock;
if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;
return;
}

View File

@ -11,9 +11,9 @@ const Loop = std.event.Loop;
/// Allows only one actor to hold the lock.
/// TODO: make this API also work in blocking I/O mode.
pub const Lock = struct {
shared_bit: u8, // TODO make this a bool
shared: bool,
queue: Queue,
queue_empty_bit: u8, // TODO make this a bool
queue_empty: bool,
const Queue = std.atomic.Queue(anyframe);
@ -31,20 +31,19 @@ pub const Lock = struct {
}
// We need to release the lock.
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
@atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
@atomicStore(bool, &self.lock.shared, false, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
// But if there's a queue item, we are the actor which must loop and attempt
// to grab the lock again.
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
while (true) {
const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit != 0) {
if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) {
// We did not obtain the lock. Great, the queue is someone else's problem.
return;
}
@ -56,11 +55,11 @@ pub const Lock = struct {
}
// Release the lock again.
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
@atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
@atomicStore(bool, &self.lock.shared, false, .SeqCst);
// Find out if we can be done.
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
}
@ -69,24 +68,24 @@ pub const Lock = struct {
pub fn init() Lock {
return Lock{
.shared_bit = 0,
.shared = false,
.queue = Queue.init(),
.queue_empty_bit = 1,
.queue_empty = true,
};
}
pub fn initLocked() Lock {
return Lock{
.shared_bit = 1,
.shared = true,
.queue = Queue.init(),
.queue_empty_bit = 1,
.queue_empty = true,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
assert(self.shared_bit == 0);
assert(!self.shared);
while (self.queue.get()) |node| resume node.data;
}
@ -99,12 +98,11 @@ pub const Lock = struct {
// At this point, we are in the queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
// We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor
// will attempt to grab the lock.
@atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
@atomicStore(bool, &self.queue_empty, false, .SeqCst);
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit == 0) {
if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;

View File

@ -16,8 +16,8 @@ pub const RwLock = struct {
shared_state: State,
writer_queue: Queue,
reader_queue: Queue,
writer_queue_empty_bit: u8, // TODO make this a bool
reader_queue_empty_bit: u8, // TODO make this a bool
writer_queue_empty: bool,
reader_queue_empty: bool,
reader_lock_count: usize,
const State = enum(u8) {
@ -40,7 +40,7 @@ pub const RwLock = struct {
return;
}
@atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
@atomicStore(bool, &self.lock.reader_queue_empty, true, .SeqCst);
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
@ -62,7 +62,7 @@ pub const RwLock = struct {
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
if (!@atomicLoad(bool, &self.lock.reader_queue_empty, .SeqCst)) {
// Switch to a read lock.
@atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
@ -71,7 +71,7 @@ pub const RwLock = struct {
return;
}
@atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
@atomicStore(bool, &self.lock.writer_queue_empty, true, .SeqCst);
@atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
self.lock.commonPostUnlock();
@ -79,12 +79,12 @@ pub const RwLock = struct {
};
pub fn init() RwLock {
return RwLock{
return .{
.shared_state = .Unlocked,
.writer_queue = Queue.init(),
.writer_queue_empty_bit = 1,
.writer_queue_empty = true,
.reader_queue = Queue.init(),
.reader_queue_empty_bit = 1,
.reader_queue_empty = true,
.reader_lock_count = 0,
};
}
@ -111,9 +111,9 @@ pub const RwLock = struct {
// At this point, we are in the reader_queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// We set this bit so that later we can rely on the fact, that if reader_queue_empty == true,
// some actor will attempt to grab the lock.
@atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
@atomicStore(bool, &self.reader_queue_empty, false, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
@ -142,9 +142,9 @@ pub const RwLock = struct {
// At this point, we are in the writer_queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// We set this bit so that later we can rely on the fact, that if writer_queue_empty == true,
// some actor will attempt to grab the lock.
@atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
@atomicStore(bool, &self.writer_queue_empty, false, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
@ -165,7 +165,7 @@ pub const RwLock = struct {
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) {
if (!@atomicLoad(bool, &self.writer_queue_empty, .SeqCst)) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
@ -176,12 +176,12 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
@atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
@atomicStore(bool, &self.writer_queue_empty, true, .SeqCst);
@atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
continue;
}
if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) {
if (!@atomicLoad(bool, &self.reader_queue_empty, .SeqCst)) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
@ -195,7 +195,7 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
@atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
@atomicStore(bool, &self.reader_queue_empty, true, .SeqCst);
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;

View File

@ -28397,15 +28397,15 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto
if (operand_type->id == ZigTypeIdEnum && op != AtomicRmwOp_xchg) {
ir_add_error(ira, &instruction->op->base,
buf_sprintf("@atomicRmw on enum only works with .Xchg"));
buf_sprintf("@atomicRmw with enum only allowed with .Xchg"));
return ira->codegen->invalid_inst_gen;
} else if (operand_type->id == ZigTypeIdBool && op != AtomicRmwOp_xchg) {
ir_add_error(ira, &instruction->op->base,
buf_sprintf("@atomicRmw on bool only works with .Xchg"));
buf_sprintf("@atomicRmw with bool only allowed with .Xchg"));
return ira->codegen->invalid_inst_gen;
} else if (operand_type->id == ZigTypeIdFloat && op > AtomicRmwOp_sub) {
ir_add_error(ira, &instruction->op->base,
buf_sprintf("@atomicRmw with float only works with .Xchg, .Add and .Sub"));
buf_sprintf("@atomicRmw with float only allowed with .Xchg, .Add and .Sub"));
return ira->codegen->invalid_inst_gen;
}

View File

@ -2,6 +2,15 @@ const tests = @import("tests.zig");
const std = @import("std");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add("atomicrmw with bool op not .Xchg",
\\export fn entry() void {
\\ var x = false;
\\ _ = @atomicRmw(bool, &x, .Add, true, .SeqCst);
\\}
, &[_][]const u8{
"tmp.zig:3:30: error: @atomicRmw with bool only allowed with .Xchg",
});
cases.addTest("combination of noasync and async",
\\export fn entry() void {
\\ noasync {
@ -325,7 +334,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @atomicRmw(f32, &x, .And, 2, .SeqCst);
\\}
, &[_][]const u8{
"tmp.zig:3:29: error: @atomicRmw with float only works with .Xchg, .Add and .Sub",
"tmp.zig:3:29: error: @atomicRmw with float only allowed with .Xchg, .Add and .Sub",
});
cases.add("intToPtr with misaligned address",
@ -542,7 +551,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @atomicRmw(E, &x, .Add, .b, .SeqCst);
\\}
, &[_][]const u8{
"tmp.zig:9:27: error: @atomicRmw on enum only works with .Xchg",
"tmp.zig:9:27: error: @atomicRmw with enum only allowed with .Xchg",
});
cases.add("disallow coercion from non-null-terminated pointer to null-terminated pointer",