better workaround for guaranteeing memory in coroutine frame

See #1194
This commit is contained in:
Andrew Kelley 2018-07-09 21:21:59 -04:00
parent 1a1534ecb5
commit c89aac85c4
2 changed files with 43 additions and 22 deletions

View File

@ -60,6 +60,31 @@ pub fn QueueMpsc(comptime T: type) type {
} }
return self.outbox.isEmpty(); return self.outbox.isEmpty();
} }
/// For debugging only. No API guarantees about what this does.
pub fn dump(self: *Self) void {
{
var it = self.outbox.root;
while (it) |node| {
std.debug.warn("0x{x} -> ", @ptrToInt(node));
it = node.next;
}
}
const inbox_index = self.inbox_index;
const inboxes = []*std.atomic.Stack(T){
&self.inboxes[self.inbox_index],
&self.inboxes[1 - self.inbox_index],
};
for (inboxes) |inbox| {
var it = inbox.root;
while (it) |node| {
std.debug.warn("0x{x} -> ", @ptrToInt(node));
it = node.next;
}
}
std.debug.warn("null\n");
}
}; };
} }

View File

@ -439,15 +439,14 @@ pub const Loop = struct {
pub async fn waitFd(self: *Loop, fd: i32) !void { pub async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd); defer self.removeFd(fd);
suspend |p| {
// TODO explicitly put this memory in the coroutine frame #1194
var resume_node = ResumeNode{ var resume_node = ResumeNode{
.id = ResumeNode.Id.Basic, .id = ResumeNode.Id.Basic,
.handle = undefined, .handle = p,
}; };
suspend |p| {
resume_node.handle = p;
try self.addFd(fd, &resume_node); try self.addFd(fd, &resume_node);
} }
var a = &resume_node; // TODO better way to explicitly put memory in coro frame
} }
/// Bring your own linked list node. This means it can't fail. /// Bring your own linked list node. This means it can't fail.
@ -618,8 +617,7 @@ pub const Loop = struct {
while (true) { while (true) {
var nbytes: windows.DWORD = undefined; var nbytes: windows.DWORD = undefined;
var overlapped: ?*windows.OVERLAPPED = undefined; var overlapped: ?*windows.OVERLAPPED = undefined;
switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
&overlapped, windows.INFINITE)) {
std.os.WindowsWaitResult.Aborted => return, std.os.WindowsWaitResult.Aborted => return,
std.os.WindowsWaitResult.Normal => {}, std.os.WindowsWaitResult.Normal => {},
} }
@ -1062,10 +1060,13 @@ pub const Lock = struct {
} }
pub async fn acquire(self: *Lock) Held { pub async fn acquire(self: *Lock) Held {
var my_tick_node: Loop.NextTickNode = undefined;
s: suspend |handle| { s: suspend |handle| {
my_tick_node.data = handle; // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = handle,
.next = undefined,
};
self.queue.put(&my_tick_node); self.queue.put(&my_tick_node);
// At this point, we are in the queue, so we might have already been resumed and this coroutine // At this point, we are in the queue, so we might have already been resumed and this coroutine
@ -1107,10 +1108,6 @@ pub const Lock = struct {
} }
} }
// TODO this workaround to force my_tick_node to be in the coroutine frame should
// not be necessary
var trash1 = &my_tick_node;
return Held{ .lock = self }; return Held{ .lock = self };
} }
}; };
@ -1176,6 +1173,10 @@ test "std.event.Lock" {
} }
async fn testLock(loop: *Loop, lock: *Lock) void { async fn testLock(loop: *Loop, lock: *Lock) void {
// TODO explicitly put next tick node memory in the coroutine frame #1194
suspend |p| {
resume p;
}
const handle1 = async lockRunner(lock) catch @panic("out of memory"); const handle1 = async lockRunner(lock) catch @panic("out of memory");
var tick_node1 = Loop.NextTickNode{ var tick_node1 = Loop.NextTickNode{
.next = undefined, .next = undefined,
@ -1200,12 +1201,6 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
await handle1; await handle1;
await handle2; await handle2;
await handle3; await handle3;
// TODO this is to force tick node memory to be in the coro frame
// there should be a way to make it explicit where the memory is
var a = &tick_node1;
var b = &tick_node2;
var c = &tick_node3;
} }
var shared_test_data = [1]i32{0} ** 10; var shared_test_data = [1]i32{0} ** 10;
@ -1216,7 +1211,8 @@ async fn lockRunner(lock: *Lock) void {
var i: usize = 0; var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) { while (i < shared_test_data.len) : (i += 1) {
const handle = await (async lock.acquire() catch @panic("out of memory")); const lock_promise = async lock.acquire() catch @panic("out of memory");
const handle = await lock_promise;
defer handle.release(); defer handle.release();
shared_test_index = 0; shared_test_index = 0;