2019-03-02 13:46:04 -08:00
|
|
|
const std = @import("std.zig");
|
2018-08-01 13:26:37 -07:00
|
|
|
const builtin = @import("builtin");
|
2019-02-08 15:18:47 -08:00
|
|
|
const testing = std.testing;
|
2018-10-03 10:19:10 -07:00
|
|
|
const SpinLock = std.SpinLock;
|
2019-11-05 14:14:43 -08:00
|
|
|
const ThreadParker = std.ThreadParker;
|
2018-08-01 13:26:37 -07:00
|
|
|
|
2018-10-03 10:19:10 -07:00
|
|
|
/// Lock may be held only once. If the same thread
|
|
|
|
/// tries to acquire the same mutex twice, it deadlocks.
|
2019-11-05 14:14:43 -08:00
|
|
|
/// This type supports static initialization and is based off of Golang 1.13 runtime.lock_futex:
|
|
|
|
/// https://github.com/golang/go/blob/master/src/runtime/lock_futex.go
|
2019-02-01 14:49:29 -08:00
|
|
|
/// When an application is built in single threaded release mode, all the functions are
|
|
|
|
/// no-ops. In single threaded debug mode, there is deadlock detection.
|
|
|
|
pub const Mutex = if (builtin.single_threaded)
|
|
|
|
struct {
|
|
|
|
lock: @typeOf(lock_init),
|
|
|
|
|
|
|
|
const lock_init = if (std.debug.runtime_safety) false else {};
|
|
|
|
|
|
|
|
pub const Held = struct {
|
|
|
|
mutex: *Mutex,
|
|
|
|
|
|
|
|
pub fn release(self: Held) void {
|
|
|
|
if (std.debug.runtime_safety) {
|
|
|
|
self.mutex.lock = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
pub fn init() Mutex {
|
|
|
|
return Mutex{ .lock = lock_init };
|
|
|
|
}
|
|
|
|
pub fn deinit(self: *Mutex) void {}
|
|
|
|
|
|
|
|
pub fn acquire(self: *Mutex) Held {
|
|
|
|
if (std.debug.runtime_safety and self.lock) {
|
|
|
|
@panic("deadlock detected");
|
|
|
|
}
|
|
|
|
return Held{ .mutex = self };
|
|
|
|
}
|
|
|
|
}
|
2019-11-07 22:52:23 -08:00
|
|
|
else
|
|
|
|
struct {
|
2019-11-12 07:40:21 -08:00
|
|
|
state: State, // TODO: make this an enum
|
2019-11-07 22:52:23 -08:00
|
|
|
parker: ThreadParker,
|
2018-11-17 22:07:37 -08:00
|
|
|
|
2019-11-12 07:40:21 -08:00
|
|
|
const State = enum(u32) {
|
|
|
|
Unlocked,
|
|
|
|
Sleeping,
|
|
|
|
Locked,
|
|
|
|
};
|
2018-08-01 13:26:37 -07:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
/// number of iterations to spin yielding the cpu
|
|
|
|
const SPIN_CPU = 4;
|
2018-08-01 13:26:37 -07:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
/// number of iterations to perform in the cpu yield loop
|
|
|
|
const SPIN_CPU_COUNT = 30;
|
|
|
|
|
|
|
|
/// number of iterations to spin yielding the thread
|
|
|
|
const SPIN_THREAD = 1;
|
|
|
|
|
|
|
|
pub fn init() Mutex {
|
|
|
|
return Mutex{
|
2019-11-12 07:40:21 -08:00
|
|
|
.state = .Unlocked,
|
2019-11-07 22:52:23 -08:00
|
|
|
.parker = ThreadParker.init(),
|
|
|
|
};
|
2018-08-01 13:26:37 -07:00
|
|
|
}
|
2019-11-05 14:14:43 -08:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
pub fn deinit(self: *Mutex) void {
|
|
|
|
self.parker.deinit();
|
|
|
|
}
|
2018-11-17 22:07:37 -08:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
pub const Held = struct {
|
|
|
|
mutex: *Mutex,
|
2018-11-17 22:07:37 -08:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
pub fn release(self: Held) void {
|
2019-11-12 07:40:21 -08:00
|
|
|
switch (@atomicRmw(State, &self.mutex.state, .Xchg, .Unlocked, .Release)) {
|
|
|
|
.Locked => {},
|
2019-11-25 10:51:09 -08:00
|
|
|
.Sleeping => self.mutex.parker.unpark(@ptrCast(*const u32, &self.mutex.state)),
|
2019-11-12 07:40:21 -08:00
|
|
|
.Unlocked => unreachable, // unlocking an unlocked mutex
|
2019-11-07 22:52:23 -08:00
|
|
|
else => unreachable, // should never be anything else
|
|
|
|
}
|
2019-11-05 14:14:43 -08:00
|
|
|
}
|
2019-11-07 22:52:23 -08:00
|
|
|
};
|
2018-11-17 22:07:37 -08:00
|
|
|
|
2019-11-07 22:52:23 -08:00
|
|
|
pub fn acquire(self: *Mutex) Held {
|
|
|
|
// Try and speculatively grab the lock.
|
|
|
|
// If it fails, the state is either Locked or Sleeping
|
|
|
|
// depending on if theres a thread stuck sleeping below.
|
2019-11-12 07:40:21 -08:00
|
|
|
var state = @atomicRmw(State, &self.state, .Xchg, .Locked, .Acquire);
|
|
|
|
if (state == .Unlocked)
|
2019-11-05 14:14:43 -08:00
|
|
|
return Held{ .mutex = self };
|
2019-11-07 22:52:23 -08:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// try and acquire the lock using cpu spinning on failure
|
|
|
|
var spin: usize = 0;
|
|
|
|
while (spin < SPIN_CPU) : (spin += 1) {
|
2019-11-12 07:40:21 -08:00
|
|
|
var value = @atomicLoad(State, &self.state, .Monotonic);
|
|
|
|
while (value == .Unlocked)
|
|
|
|
value = @cmpxchgWeak(State, &self.state, .Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
|
2019-11-07 22:52:23 -08:00
|
|
|
SpinLock.yield(SPIN_CPU_COUNT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// try and acquire the lock using thread rescheduling on failure
|
|
|
|
spin = 0;
|
|
|
|
while (spin < SPIN_THREAD) : (spin += 1) {
|
2019-11-12 07:40:21 -08:00
|
|
|
var value = @atomicLoad(State, &self.state, .Monotonic);
|
|
|
|
while (value == .Unlocked)
|
|
|
|
value = @cmpxchgWeak(State, &self.state, .Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
|
2019-11-07 22:52:23 -08:00
|
|
|
std.os.sched_yield() catch std.time.sleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// failed to acquire the lock, go to sleep until woken up by `Held.release()`
|
2019-11-12 07:40:21 -08:00
|
|
|
if (@atomicRmw(State, &self.state, .Xchg, .Sleeping, .Acquire) == .Unlocked)
|
2019-11-07 22:52:23 -08:00
|
|
|
return Held{ .mutex = self };
|
2019-11-12 07:40:21 -08:00
|
|
|
state = .Sleeping;
|
2019-11-25 10:51:09 -08:00
|
|
|
self.parker.park(@ptrCast(*const u32, &self.state), @enumToInt(State.Sleeping));
|
2019-11-07 22:52:23 -08:00
|
|
|
}
|
2018-11-19 11:52:10 -08:00
|
|
|
}
|
2019-11-07 22:52:23 -08:00
|
|
|
};
|
2018-10-03 11:55:12 -07:00
|
|
|
|
2018-11-26 19:07:01 -08:00
|
|
|
const TestContext = struct {
|
2018-10-03 11:55:12 -07:00
|
|
|
mutex: *Mutex,
|
|
|
|
data: i128,
|
|
|
|
|
|
|
|
const incr_count = 10000;
|
|
|
|
};
|
|
|
|
|
|
|
|
test "std.Mutex" {
|
2019-11-25 14:25:06 -08:00
|
|
|
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
|
|
|
defer std.heap.page_allocator.free(plenty_of_memory);
|
2018-10-03 11:55:12 -07:00
|
|
|
|
|
|
|
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
|
|
|
var a = &fixed_buffer_allocator.allocator;
|
|
|
|
|
|
|
|
var mutex = Mutex.init();
|
2018-11-17 22:07:37 -08:00
|
|
|
defer mutex.deinit();
|
|
|
|
|
2018-11-26 19:07:01 -08:00
|
|
|
var context = TestContext{
|
2018-10-03 11:55:12 -07:00
|
|
|
.mutex = &mutex,
|
|
|
|
.data = 0,
|
|
|
|
};
|
|
|
|
|
2019-02-01 14:49:29 -08:00
|
|
|
if (builtin.single_threaded) {
|
|
|
|
worker(&context);
|
2019-02-08 15:18:47 -08:00
|
|
|
testing.expect(context.data == TestContext.incr_count);
|
2019-02-01 14:49:29 -08:00
|
|
|
} else {
|
|
|
|
const thread_count = 10;
|
2019-05-25 10:07:44 -07:00
|
|
|
var threads: [thread_count]*std.Thread = undefined;
|
2019-02-01 14:49:29 -08:00
|
|
|
for (threads) |*t| {
|
2019-05-25 10:07:44 -07:00
|
|
|
t.* = try std.Thread.spawn(&context, worker);
|
2019-02-01 14:49:29 -08:00
|
|
|
}
|
|
|
|
for (threads) |t|
|
|
|
|
t.wait();
|
2018-10-03 11:55:12 -07:00
|
|
|
|
2019-02-08 15:18:47 -08:00
|
|
|
testing.expect(context.data == thread_count * TestContext.incr_count);
|
2019-02-01 14:49:29 -08:00
|
|
|
}
|
2018-10-03 11:55:12 -07:00
|
|
|
}
|
|
|
|
|
2018-11-26 19:07:01 -08:00
|
|
|
fn worker(ctx: *TestContext) void {
|
2018-10-03 11:55:12 -07:00
|
|
|
var i: usize = 0;
|
2018-11-26 19:07:01 -08:00
|
|
|
while (i != TestContext.incr_count) : (i += 1) {
|
2018-10-03 11:55:12 -07:00
|
|
|
const held = ctx.mutex.acquire();
|
|
|
|
defer held.release();
|
|
|
|
|
|
|
|
ctx.data += 1;
|
|
|
|
}
|
|
|
|
}
|