zig/lib/std/event/group.zig

161 lines
5.8 KiB
Zig
Raw Normal View History

2019-03-02 13:46:04 -08:00
const std = @import("../std.zig");
const builtin = @import("builtin");
const Lock = std.event.Lock;
const testing = std.testing;
const Allocator = std.mem.Allocator;
/// ReturnType must be `void` or `E!void`
self-hosted libc detection * libc_installation.cpp is deleted. src-self-hosted/libc_installation.zig is now used for both stage1 and stage2 compilers. * (breaking) move `std.fs.File.access` to `std.fs.Dir.access`. The API now encourages use with an open directory handle. * Add `std.os.faccessat` and related functions. * Deprecate the "C" suffix naming convention for null-terminated parameters. "C" should be used when it is related to libc. However null-terminated parameters often have to do with the native system ABI rather than libc. "Z" suffix is the new convention. For example, `std.os.openC` is deprecated in favor of `std.os.openZ`. * Add `std.mem.dupeZ` for using an allocator to copy memory and add a null terminator. * Remove dead struct field `std.ChildProcess.llnode`. * Introduce `std.event.Batch`. This API allows expressing concurrency without forcing code to be async. It requires no Allocator and does not introduce any failure conditions. However it is not thread-safe. * There is now an ongoing experiment to transition away from `std.event.Group` in favor of `std.event.Batch`. * `std.os.execvpeC` calls `getenvZ` rather than `getenv`. This is slightly more efficient on most systems, and works around a limitation of `getenv` lack of integration with libc. * (breaking) `std.os.AccessError` gains `FileBusy`, `SymLinkLoop`, and `ReadOnlyFileSystem`. Previously these error codes were all reported as `PermissionDenied`. * Add `std.Target.isDragonFlyBSD`. * stage2: access to the windows_sdk functions is done with a manually maintained .zig binding file instead of `@cImport`. * Update src-self-hosted/libc_installation.zig with all the improvements that stage1 has seen to src/libc_installation.cpp until now. In addition, it now takes advantage of Batch so that evented I/O mode takes advantage of concurrency, but it still works in blocking I/O mode, which is how it is used in stage1.
2020-02-16 10:25:30 -08:00
/// TODO This API was created back with the old design of async/await, when calling any
/// async function required an allocator. There is an ongoing experiment to transition
/// all uses of this API to the simpler and more resource-aware `std.event.Batch` API.
/// If the transition goes well, all usages of `Group` will be gone, and this API
/// will be deleted.
pub fn Group(comptime ReturnType: type) type {
return struct {
frame_stack: Stack,
alloc_stack: AllocStack,
lock: Lock,
allocator: *Allocator,
const Self = @This();
const Error = switch (@typeInfo(ReturnType)) {
2019-08-11 16:53:10 -07:00
.ErrorUnion => |payload| payload.error_set,
else => void,
};
2019-08-11 16:53:10 -07:00
const Stack = std.atomic.Stack(anyframe->ReturnType);
const AllocStack = std.atomic.Stack(Node);
pub const Node = struct {
2020-02-08 13:24:26 -08:00
bytes: []const u8 = &[0]u8{},
handle: anyframe->ReturnType,
};
pub fn init(allocator: *Allocator) Self {
return Self{
.frame_stack = Stack.init(),
.alloc_stack = AllocStack.init(),
.lock = Lock.init(),
.allocator = allocator,
};
}
2019-08-11 16:53:10 -07:00
/// Add a frame to the group. Thread-safe.
pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.allocator.create(AllocStack.Node);
node.* = AllocStack.Node{
.next = undefined,
.data = Node{
.handle = handle,
},
};
self.alloc_stack.push(node);
}
/// Add a node to the group. Thread-safe. Cannot fail.
2019-08-11 16:53:10 -07:00
/// `node.data` should be the frame handle to add to the group.
/// The node's memory should be in the function frame of
/// the handle that is in the node, or somewhere guaranteed to live
/// at least as long.
pub fn addNode(self: *Self, node: *Stack.Node) void {
self.frame_stack.push(node);
}
/// This is equivalent to adding a frame to the group but the memory of its frame is
/// allocated by the group and freed by `wait`.
/// `func` must be async and have return type `ReturnType`.
/// Thread-safe.
pub fn call(self: *Self, comptime func: var, args: var) error{OutOfMemory}!void {
var frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args)));
errdefer self.allocator.destroy(frame);
const node = try self.allocator.create(AllocStack.Node);
errdefer self.allocator.destroy(node);
node.* = AllocStack.Node{
.next = undefined,
.data = Node{
.handle = frame,
2019-11-23 12:56:05 -08:00
.bytes = std.mem.asBytes(frame),
},
};
frame.* = @call(.{ .modifier = .async_kw }, func, args);
self.alloc_stack.push(node);
}
/// Wait for all the calls and promises of the group to complete.
/// Thread-safe.
/// Safe to call any number of times.
2020-05-04 08:49:27 -07:00
pub fn wait(self: *Self) callconv(.Async) ReturnType {
2019-08-11 16:53:10 -07:00
const held = self.lock.acquire();
defer held.release();
2019-08-15 11:05:12 -07:00
var result: ReturnType = {};
while (self.frame_stack.pop()) |node| {
if (Error == void) {
await node.data;
} else {
(await node.data) catch |err| {
2019-08-15 11:05:12 -07:00
result = err;
};
}
}
while (self.alloc_stack.pop()) |node| {
const handle = node.data.handle;
if (Error == void) {
await handle;
} else {
(await handle) catch |err| {
2019-08-15 11:05:12 -07:00
result = err;
};
}
self.allocator.free(node.data.bytes);
self.allocator.destroy(node);
}
2019-08-15 11:05:12 -07:00
return result;
}
};
}
test "std.event.Group" {
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
if (!std.io.is_async) return error.SkipZigTest;
// TODO this file has bit-rotted. repair it
if (true) return error.SkipZigTest;
const handle = async testGroup(std.heap.page_allocator);
}
2020-05-04 08:49:27 -07:00
fn testGroup(allocator: *Allocator) callconv(.Async) void {
var count: usize = 0;
var group = Group(void).init(allocator);
2019-08-11 16:53:10 -07:00
var sleep_a_little_frame = async sleepALittle(&count);
group.add(&sleep_a_little_frame) catch @panic("memory");
var increase_by_ten_frame = async increaseByTen(&count);
group.add(&increase_by_ten_frame) catch @panic("memory");
group.wait();
testing.expect(count == 11);
var another = Group(anyerror!void).init(allocator);
2019-08-11 16:53:10 -07:00
var something_else_frame = async somethingElse();
another.add(&something_else_frame) catch @panic("memory");
var something_that_fails_frame = async doSomethingThatFails();
another.add(&something_that_fails_frame) catch @panic("memory");
testing.expectError(error.ItBroke, another.wait());
}
2020-05-04 08:49:27 -07:00
fn sleepALittle(count: *usize) callconv(.Async) void {
std.time.sleep(1 * std.time.ns_per_ms);
2019-08-11 16:53:10 -07:00
_ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
2020-05-04 08:49:27 -07:00
fn increaseByTen(count: *usize) callconv(.Async) void {
var i: usize = 0;
while (i < 10) : (i += 1) {
2019-08-11 16:53:10 -07:00
_ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
}
2020-05-04 08:49:27 -07:00
fn doSomethingThatFails() callconv(.Async) anyerror!void {}
fn somethingElse() callconv(.Async) anyerror!void {
return error.ItBroke;
}