2019-03-02 13:46:04 -08:00
|
|
|
const std = @import("../std.zig");
|
2018-07-09 19:22:44 -07:00
|
|
|
const builtin = @import("builtin");
|
|
|
|
const assert = std.debug.assert;
|
2019-02-08 15:18:47 -08:00
|
|
|
const testing = std.testing;
|
2018-07-09 19:22:44 -07:00
|
|
|
const mem = std.mem;
|
|
|
|
const Loop = std.event.Loop;
|
|
|
|
|
|
|
|
/// Thread-safe async/await lock.
|
2019-08-13 11:14:19 -07:00
|
|
|
/// Functions which are waiting for the lock are suspended, and
|
2018-07-09 19:22:44 -07:00
|
|
|
/// are resumed when the lock is released, in order.
|
2018-08-01 13:26:37 -07:00
|
|
|
/// Allows only one actor to hold the lock.
|
2019-10-31 08:41:39 -07:00
|
|
|
/// TODO: make this API also work in blocking I/O mode.
|
2018-11-13 05:08:37 -08:00
|
|
|
pub const Lock = struct {
|
2018-07-09 19:22:44 -07:00
|
|
|
shared_bit: u8, // TODO make this a bool
|
|
|
|
queue: Queue,
|
|
|
|
queue_empty_bit: u8, // TODO make this a bool
|
|
|
|
|
2019-08-11 16:53:10 -07:00
|
|
|
const Queue = std.atomic.Queue(anyframe);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
const global_event_loop = Loop.instance orelse
|
|
|
|
@compileError("std.event.Lock currently only works with event-based I/O");
|
|
|
|
|
2018-11-13 05:08:37 -08:00
|
|
|
pub const Held = struct {
|
2018-07-09 19:22:44 -07:00
|
|
|
lock: *Lock,
|
|
|
|
|
|
|
|
pub fn release(self: Held) void {
|
|
|
|
// Resume the next item from the queue.
|
|
|
|
if (self.lock.queue.get()) |node| {
|
2019-11-06 11:41:35 -08:00
|
|
|
global_event_loop.onNextTick(node);
|
2018-07-09 19:22:44 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to release the lock.
|
2019-11-12 14:45:37 -08:00
|
|
|
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
|
|
|
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
|
|
|
// There might be a queue item. If we know the queue is empty, we can be done,
|
|
|
|
// because the other actor will try to obtain the lock.
|
|
|
|
// But if there's a queue item, we are the actor which must loop and attempt
|
|
|
|
// to grab the lock again.
|
2019-08-11 16:53:10 -07:00
|
|
|
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
|
2018-07-09 19:22:44 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
2019-08-11 16:53:10 -07:00
|
|
|
const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
|
2018-07-09 19:22:44 -07:00
|
|
|
if (old_bit != 0) {
|
|
|
|
// We did not obtain the lock. Great, the queue is someone else's problem.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resume the next item from the queue.
|
|
|
|
if (self.lock.queue.get()) |node| {
|
2019-11-06 11:41:35 -08:00
|
|
|
global_event_loop.onNextTick(node);
|
2018-07-09 19:22:44 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release the lock again.
|
2019-11-12 14:45:37 -08:00
|
|
|
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
|
|
|
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
|
|
|
// Find out if we can be done.
|
2019-08-11 16:53:10 -07:00
|
|
|
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
|
2018-07-09 19:22:44 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
pub fn init() Lock {
|
2018-11-13 05:08:37 -08:00
|
|
|
return Lock{
|
2018-07-09 19:22:44 -07:00
|
|
|
.shared_bit = 0,
|
|
|
|
.queue = Queue.init(),
|
|
|
|
.queue_empty_bit = 1,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
pub fn initLocked() Lock {
|
2018-11-13 05:08:37 -08:00
|
|
|
return Lock{
|
2018-07-11 12:58:48 -07:00
|
|
|
.shared_bit = 1,
|
|
|
|
.queue = Queue.init(),
|
|
|
|
.queue_empty_bit = 1,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-07-09 19:22:44 -07:00
|
|
|
/// Must be called when not locked. Not thread safe.
|
|
|
|
/// All calls to acquire() and release() must complete before calling deinit().
|
|
|
|
pub fn deinit(self: *Lock) void {
|
|
|
|
assert(self.shared_bit == 0);
|
2019-08-11 16:53:10 -07:00
|
|
|
while (self.queue.get()) |node| resume node.data;
|
2018-07-09 19:22:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn acquire(self: *Lock) Held {
|
2019-08-11 16:53:10 -07:00
|
|
|
var my_tick_node = Loop.NextTickNode.init(@frame());
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2018-08-02 14:04:17 -07:00
|
|
|
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
|
2018-08-02 14:36:08 -07:00
|
|
|
suspend {
|
2018-07-09 19:22:44 -07:00
|
|
|
self.queue.put(&my_tick_node);
|
|
|
|
|
2019-08-13 11:14:19 -07:00
|
|
|
// At this point, we are in the queue, so we might have already been resumed.
|
2018-07-09 19:22:44 -07:00
|
|
|
|
|
|
|
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
|
|
|
|
// will attempt to grab the lock.
|
2019-11-12 14:45:37 -08:00
|
|
|
@atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-08-11 16:53:10 -07:00
|
|
|
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
|
2018-07-31 16:57:46 -07:00
|
|
|
if (old_bit == 0) {
|
2018-07-09 19:22:44 -07:00
|
|
|
if (self.queue.get()) |node| {
|
|
|
|
// Whether this node is us or someone else, we tail resume it.
|
|
|
|
resume node.data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:08:37 -08:00
|
|
|
return Held{ .lock = self };
|
2018-07-09 19:22:44 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
test "std.event.Lock" {
|
2019-08-11 16:53:10 -07:00
|
|
|
// TODO https://github.com/ziglang/zig/issues/1908
|
2019-06-26 11:00:44 -07:00
|
|
|
if (builtin.single_threaded) return error.SkipZigTest;
|
2019-10-30 18:29:45 -07:00
|
|
|
|
2019-09-19 10:45:54 -07:00
|
|
|
// TODO https://github.com/ziglang/zig/issues/3251
|
2019-10-23 22:06:03 -07:00
|
|
|
if (builtin.os == .freebsd) return error.SkipZigTest;
|
2019-02-01 14:49:29 -08:00
|
|
|
|
2019-10-31 08:41:39 -07:00
|
|
|
// TODO provide a way to run tests in evented I/O mode
|
|
|
|
if (!std.io.is_async) return error.SkipZigTest;
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
var lock = Lock.init();
|
2018-07-09 19:22:44 -07:00
|
|
|
defer lock.deinit();
|
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
_ = async testLock(&lock);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-02-08 15:18:47 -08:00
|
|
|
testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data);
|
2018-07-09 19:22:44 -07:00
|
|
|
}
|
|
|
|
|
2019-11-06 11:41:35 -08:00
|
|
|
async fn testLock(lock: *Lock) void {
|
2019-08-16 08:27:29 -07:00
|
|
|
var handle1 = async lockRunner(lock);
|
2018-11-13 05:08:37 -08:00
|
|
|
var tick_node1 = Loop.NextTickNode{
|
2018-08-02 14:04:17 -07:00
|
|
|
.prev = undefined,
|
2018-07-09 19:22:44 -07:00
|
|
|
.next = undefined,
|
2019-08-11 16:53:10 -07:00
|
|
|
.data = &handle1,
|
2018-07-09 19:22:44 -07:00
|
|
|
};
|
2019-11-06 11:41:35 -08:00
|
|
|
Loop.instance.?.onNextTick(&tick_node1);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-08-16 08:27:29 -07:00
|
|
|
var handle2 = async lockRunner(lock);
|
2018-11-13 05:08:37 -08:00
|
|
|
var tick_node2 = Loop.NextTickNode{
|
2018-08-02 14:04:17 -07:00
|
|
|
.prev = undefined,
|
2018-07-09 19:22:44 -07:00
|
|
|
.next = undefined,
|
2019-08-11 16:53:10 -07:00
|
|
|
.data = &handle2,
|
2018-07-09 19:22:44 -07:00
|
|
|
};
|
2019-11-06 11:41:35 -08:00
|
|
|
Loop.instance.?.onNextTick(&tick_node2);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
2019-08-16 08:27:29 -07:00
|
|
|
var handle3 = async lockRunner(lock);
|
2018-11-13 05:08:37 -08:00
|
|
|
var tick_node3 = Loop.NextTickNode{
|
2018-08-02 14:04:17 -07:00
|
|
|
.prev = undefined,
|
2018-07-09 19:22:44 -07:00
|
|
|
.next = undefined,
|
2019-08-11 16:53:10 -07:00
|
|
|
.data = &handle3,
|
2018-07-09 19:22:44 -07:00
|
|
|
};
|
2019-11-06 11:41:35 -08:00
|
|
|
Loop.instance.?.onNextTick(&tick_node3);
|
2018-07-09 19:22:44 -07:00
|
|
|
|
|
|
|
await handle1;
|
|
|
|
await handle2;
|
|
|
|
await handle3;
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:08:37 -08:00
|
|
|
var shared_test_data = [1]i32{0} ** 10;
|
2018-07-09 19:22:44 -07:00
|
|
|
var shared_test_index: usize = 0;
|
|
|
|
|
|
|
|
async fn lockRunner(lock: *Lock) void {
|
|
|
|
suspend; // resumed by onNextTick
|
|
|
|
|
|
|
|
var i: usize = 0;
|
|
|
|
while (i < shared_test_data.len) : (i += 1) {
|
2019-08-16 08:27:29 -07:00
|
|
|
var lock_frame = async lock.acquire();
|
|
|
|
const handle = await lock_frame;
|
2018-07-09 19:22:44 -07:00
|
|
|
defer handle.release();
|
|
|
|
|
|
|
|
shared_test_index = 0;
|
|
|
|
while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
|
|
|
|
shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|