remove cancel

This commit is contained in:
Andrew Kelley 2019-08-15 14:05:12 -04:00
parent 64c293f8a4
commit 13b5a4bf8c
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
24 changed files with 251 additions and 801 deletions

View File

@ -750,7 +750,6 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
.Keyword_async,
.Keyword_await,
.Keyword_break,
.Keyword_cancel,
.Keyword_catch,
.Keyword_comptime,
.Keyword_const,

View File

@ -5971,7 +5971,7 @@ test "global assembly" {
{#header_open|Async Functions#}
<p>
An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation,
followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled.
followed by an {#syntax#}await{#endsyntax#} completion.
</p>
<p>
When you call a function, it creates a stack frame,
@ -6013,11 +6013,11 @@ test "global assembly" {
<p>
The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#}
is the return type of the async function. Once a promise has been created, it must be
consumed, either with {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}:
consumed with {#syntax#}await{#endsyntax#}:
</p>
<p>
Async functions start executing when created, so in the following example, the entire
async function completes before it is canceled:
TODO
</p>
{#code_begin|test#}
const std = @import("std");
@ -6048,7 +6048,7 @@ fn simpleAsyncFn() void {
</p>
<p>
When an async function suspends itself, it must be sure that it will be
resumed or canceled somehow, for example by registering its promise handle
resumed somehow, for example by registering its promise handle
in an event loop. Use a suspend capture block to gain access to the
promise (TODO this is outdated):
</p>
@ -6134,7 +6134,7 @@ async fn testResumeFromSuspend(my_result: *i32) void {
resumes the awaiter.
</p>
<p>
A promise handle must be consumed exactly once after it is created, either by {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}.
A frame handle must be consumed exactly once after it is created with {#syntax#}await{#endsyntax#}.
</p>
<p>
{#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#},
@ -9764,7 +9764,6 @@ PrimaryExpr
&lt;- AsmExpr
/ IfExpr
/ KEYWORD_break BreakLabel? Expr?
/ KEYWORD_cancel Expr
/ KEYWORD_comptime Expr
/ KEYWORD_continue BreakLabel?
/ KEYWORD_resume Expr
@ -10120,7 +10119,6 @@ KEYWORD_asm &lt;- 'asm' end_of_word
KEYWORD_async &lt;- 'async' end_of_word
KEYWORD_await &lt;- 'await' end_of_word
KEYWORD_break &lt;- 'break' end_of_word
KEYWORD_cancel &lt;- 'cancel' end_of_word
KEYWORD_catch &lt;- 'catch' end_of_word
KEYWORD_comptime &lt;- 'comptime' end_of_word
KEYWORD_const &lt;- 'const' end_of_word
@ -10165,7 +10163,7 @@ KEYWORD_volatile &lt;- 'volatile' end_of_word
KEYWORD_while &lt;- 'while' end_of_word
keyword &lt;- KEYWORD_align / KEYWORD_and / KEYWORD_allowzero / KEYWORD_asm
/ KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_cancel
/ KEYWORD_async / KEYWORD_await / KEYWORD_break
/ KEYWORD_catch / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue
/ KEYWORD_defer / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer
/ KEYWORD_error / KEYWORD_export / KEYWORD_extern / KEYWORD_false

View File

@ -467,7 +467,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.start();
// TODO const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
@ -579,7 +578,6 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
defer zig_compiler.deinit();
// TODO const handle = try async<loop.allocator> findLibCAsync(&zig_compiler);
defer cancel handle;
loop.run();
}
@ -669,7 +667,6 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
// TODO &flags,
// TODO color,
// TODO );
defer cancel main_handle;
loop.run();
return result;
}

View File

@ -476,7 +476,6 @@ enum NodeType {
NodeTypeIfErrorExpr,
NodeTypeIfOptional,
NodeTypeErrorSetDecl,
NodeTypeCancel,
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
@ -911,10 +910,6 @@ struct AstNodeBreakExpr {
AstNode *expr; // may be null
};
struct AstNodeCancelExpr {
AstNode *expr;
};
struct AstNodeResumeExpr {
AstNode *expr;
};
@ -1003,7 +998,6 @@ struct AstNode {
AstNodeInferredArrayType inferred_array_type;
AstNodeErrorType error_type;
AstNodeErrorSetDecl err_set_decl;
AstNodeCancelExpr cancel_expr;
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
@ -1561,7 +1555,6 @@ enum PanicMsgId {
PanicMsgIdBadAwait,
PanicMsgIdBadReturn,
PanicMsgIdResumedAnAwaitingFn,
PanicMsgIdResumedACancelingFn,
PanicMsgIdFrameTooSmall,
PanicMsgIdResumedFnPendingAwait,
@ -1729,8 +1722,6 @@ struct CodeGen {
LLVMValueRef cur_async_switch_instr;
LLVMValueRef cur_async_resume_index_ptr;
LLVMValueRef cur_async_awaiter_ptr;
LLVMValueRef cur_async_prev_val;
LLVMValueRef cur_async_prev_val_field_ptr;
LLVMBasicBlockRef cur_preamble_llvm_block;
size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
@ -1822,7 +1813,6 @@ struct CodeGen {
ZigType *align_amt_type;
ZigType *stack_trace_type;
ZigType *ptr_to_stack_trace_type;
ZigType *err_tag_type;
ZigType *test_fn_type;
@ -1892,7 +1882,6 @@ struct CodeGen {
bool system_linker_hack;
bool reported_bad_link_libc_error;
bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl.
bool cur_is_after_return;
//////////////////////////// Participates in Input Parameter Cache Hash
/////// Note: there is a separate cache hash for builtin.zig, when adding fields,
@ -2235,7 +2224,6 @@ enum IrInstructionId {
IrInstructionIdCallGen,
IrInstructionIdConst,
IrInstructionIdReturn,
IrInstructionIdReturnBegin,
IrInstructionIdCast,
IrInstructionIdResizeSlice,
IrInstructionIdContainerInitList,
@ -2345,7 +2333,6 @@ enum IrInstructionId {
IrInstructionIdExport,
IrInstructionIdErrorReturnTrace,
IrInstructionIdErrorUnion,
IrInstructionIdCancel,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
IrInstructionIdSaveErrRetAddr,
@ -2370,7 +2357,6 @@ enum IrInstructionId {
IrInstructionIdAwaitSrc,
IrInstructionIdAwaitGen,
IrInstructionIdResume,
IrInstructionIdTestCancelRequested,
IrInstructionIdSpillBegin,
IrInstructionIdSpillEnd,
};
@ -2649,12 +2635,6 @@ struct IrInstructionReturn {
IrInstruction *operand;
};
struct IrInstructionReturnBegin {
IrInstruction base;
IrInstruction *operand;
};
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@ -3440,12 +3420,6 @@ struct IrInstructionErrorUnion {
IrInstruction *payload;
};
struct IrInstructionCancel {
IrInstruction base;
IrInstruction *frame;
};
struct IrInstructionAtomicRmw {
IrInstruction base;
@ -3647,10 +3621,6 @@ struct IrInstructionResume {
IrInstruction *frame;
};
struct IrInstructionTestCancelRequested {
IrInstruction base;
};
enum SpillId {
SpillIdInvalid,
SpillIdRetErrCode,
@ -3756,8 +3726,7 @@ static const size_t err_union_err_index = 1;
static const size_t frame_fn_ptr_index = 0;
static const size_t frame_resume_index = 1;
static const size_t frame_awaiter_index = 2;
static const size_t frame_prev_val_index = 3;
static const size_t frame_ret_start = 4;
static const size_t frame_ret_start = 3;
// TODO https://github.com/ziglang/zig/issues/3056
// We require this to be a power of 2 so that we can use shifting rather than

View File

@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
ZigType *get_ptr_to_stack_trace_type(CodeGen *g) {
ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
g->stack_trace_type = stack_trace_type_val->data.x_type;
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false);
}
return g->ptr_to_stack_trace_type;
return g->stack_trace_type;
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
@ -3035,7 +3033,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeIfErrorExpr:
case NodeTypeIfOptional:
case NodeTypeErrorSetDecl:
case NodeTypeCancel:
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
@ -3822,11 +3819,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
} else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("await is a suspend point"));
} else if (fn->inferred_async_node->type == NodeTypeCancel) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("cancel is a suspend point"));
} else {
zig_unreachable();
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("suspends here"));
}
}
@ -5231,12 +5226,21 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0});
fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
fields.append({"@prev_val", g->builtin_types.entry_usize, 0});
fields.append({"@result_ptr_callee", ptr_return_type, 0});
fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
fields.append({"@result", fn_type_id->return_type, 0});
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0});
fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0});
fields.append({"@stack_trace", get_stack_trace_type(g), 0});
fields.append({"@instruction_addresses",
get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
}
frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
fields.items, fields.length, target_fn_align(g->zig_target));
frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
@ -5311,14 +5315,15 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
fields.append({"@fn_ptr", fn_type, 0});
fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
fields.append({"@prev_val", g->builtin_types.entry_usize, 0});
fields.append({"@result_ptr_callee", ptr_return_type, 0});
fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
fields.append({"@result", fn_type_id->return_type, 0});
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
fields.append({"@ptr_stack_trace", get_ptr_to_stack_trace_type(g), 0});
ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0});
fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0});
}
for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
@ -5337,9 +5342,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
}
if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
(void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
fields.append({"@stack_trace", g->stack_trace_type, 0});
fields.append({"@stack_trace", get_stack_trace_type(g), 0});
fields.append({"@instruction_addresses",
get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
}
@ -7553,7 +7556,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
fn_type->data.fn.gen_return_type = gen_return_type;
if (prefix_arg_error_return_trace && !is_async) {
ZigType *gen_type = get_ptr_to_stack_trace_type(g);
ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
}
@ -7727,7 +7730,6 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
field_types.append(ptr_fn_llvm_type); // fn_ptr
field_types.append(usize_type_ref); // resume_index
field_types.append(usize_type_ref); // awaiter
field_types.append(usize_type_ref); // prev_val
bool have_result_type = result_type != nullptr && type_has_bits(result_type);
if (have_result_type) {
@ -7735,7 +7737,9 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
field_types.append(get_llvm_type(g, result_type)); // result
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace
ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee
field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter
}
}
LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
@ -7792,14 +7796,23 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace",
ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g))));
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
}
};

View File

@ -195,7 +195,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global
ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
ZigType *get_ptr_to_stack_trace_type(CodeGen *g);
ZigType *get_stack_trace_type(CodeGen *g);
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry);

View File

@ -249,8 +249,6 @@ static const char *node_type_str(NodeType node_type) {
return "IfOptional";
case NodeTypeErrorSetDecl:
return "ErrorSetDecl";
case NodeTypeCancel:
return "Cancel";
case NodeTypeResume:
return "Resume";
case NodeTypeAwaitExpr:
@ -1136,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "}");
break;
}
case NodeTypeCancel:
{
fprintf(ar->f, "cancel ");
render_node_grouped(ar, node->data.cancel_expr.expr);
break;
}
case NodeTypeResume:
{
fprintf(ar->f, "resume ");

View File

@ -28,8 +28,6 @@ enum ResumeId {
ResumeIdManual,
ResumeIdReturn,
ResumeIdCall,
ResumeIdAwaitEarlyReturn // must be last
};
static void init_darwin_native(CodeGen *g) {
@ -317,8 +315,9 @@ static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
// [0] *StackTrace
uint32_t trace_field_count = have_stack_trace ? 1 : 0;
// [0] *StackTrace (callee's)
// [1] *StackTrace (awaiter's)
uint32_t trace_field_count = have_stack_trace ? 2 : 0;
return frame_index_trace_arg(g, return_type) + trace_field_count;
}
@ -916,8 +915,6 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("async function returned twice");
case PanicMsgIdResumedAnAwaitingFn:
return buf_create_from_str("awaiting function resumed");
case PanicMsgIdResumedACancelingFn:
return buf_create_from_str("canceling function resumed");
case PanicMsgIdFrameTooSmall:
return buf_create_from_str("frame too small");
case PanicMsgIdResumedFnPendingAwait:
@ -946,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0));
}
static ZigType *ptr_to_stack_trace_type(CodeGen *g) {
return get_pointer_to_type(g, get_stack_trace_type(g), false);
}
static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) {
assert(g->panic_fn != nullptr);
LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn);
LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc);
if (stack_trace_arg == nullptr) {
ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
msg_arg,
@ -1046,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return g->add_error_return_trace_addr_fn_val;
LLVMTypeRef arg_types[] = {
get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
get_llvm_type(g, ptr_to_stack_trace_type(g)),
g->builtin_types.entry_usize->llvm_type,
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@ -1127,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMTypeRef arg_types[] = {
// error return trace pointer
get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false);
@ -1205,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMTypeRef fn_type_ref;
if (g->have_err_ret_tracing) {
LLVMTypeRef arg_types[] = {
get_llvm_type(g, g->ptr_to_stack_trace_type),
get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)),
get_llvm_type(g, g->err_tag_type),
};
fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@ -1321,14 +1321,7 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
if (g->cur_err_ret_trace_val_arg != nullptr) {
if (fn_is_async(g->cur_fn)) {
return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
} else {
return g->cur_err_ret_trace_val_arg;
}
}
return nullptr;
return g->cur_err_ret_trace_val_arg;
}
static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) {
@ -1337,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
if (g->have_err_ret_tracing) {
LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope);
if (err_ret_trace_val == nullptr) {
ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
err_ret_trace_val,
@ -2044,8 +2036,8 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
assert(g->stack_trace_type != nullptr);
LLVMTypeRef param_types[] = {
get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
get_llvm_type(g, ptr_to_stack_trace_type(g)),
get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
@ -2058,7 +2050,6 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
if (g->build_mode == BuildModeDebug) {
@ -2075,7 +2066,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, entry_block);
ZigLLVMClearCurrentDebugLocation(g->builder);
// if (dest_stack_trace == null) return;
// if (dest_stack_trace == null or src_stack_trace == null) return;
// var frame_index: usize = undefined;
// var frames_left: usize = undefined;
// if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
@ -2093,7 +2084,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
// frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
// }
LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull");
LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull");
LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
@ -2103,9 +2094,12 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr,
LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), "");
LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block);
LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr,
LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), "");
LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, "");
LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block);
LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
LLVMPositionBuilderAtEnd(g->builder, non_null_block);
size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
@ -2183,13 +2177,11 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
if (fn_is_async(g->cur_fn) && g->cur_fn->calls_or_awaits_errorable_fn &&
codegen_fn_has_err_ret_tracing_arg(g, g->cur_fn->type_entry->data.fn.fn_type_id.return_type))
{
LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
frame_index_trace_arg(g, ret_type), "");
LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr);
}
return nullptr;
@ -2201,16 +2193,9 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
LLVMValueRef ok_bit;
if (resume_id == ResumeIdAwaitEarlyReturn) {
LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false));
ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, "");
} else {
LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, resume_id, false));
ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
}
LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, resume_id, false));
LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
@ -2219,36 +2204,19 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume
LLVMPositionBuilderAtEnd(g->builder, end_bb);
}
static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr,
ResumeId resume_id, LLVMValueRef arg_val)
{
static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (fn_val == nullptr) {
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, "");
fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
}
if (arg_val == nullptr) {
arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, resume_id, false), "");
} else {
assert(resume_id == ResumeIdAwaitEarlyReturn);
}
LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
LLVMConstInt(usize_type_ref, resume_id, false), "");
LLVMValueRef args[] = {target_frame_ptr, arg_val};
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
}
static LLVMValueRef get_cur_async_prev_val(CodeGen *g) {
if (g->cur_async_prev_val != nullptr) {
return g->cur_async_prev_val;
}
g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, "");
return g->cur_async_prev_val;
}
static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
// This becomes invalid when a suspend happens.
g->cur_async_prev_val = nullptr;
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
size_t new_block_index = g->cur_resume_block_count;
@ -2259,6 +2227,10 @@ static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
return resume_bb;
}
static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
LLVMSetTailCall(call_inst, true);
}
static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val,
LLVMAtomicOrdering order)
{
@ -2282,32 +2254,32 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV
}
}
static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
IrInstructionReturnBegin *instruction)
{
static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr;
bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type);
if (!fn_is_async(g->cur_fn)) {
return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr;
}
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
bool ret_type_has_bits = type_has_bits(ret_type);
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (operand_has_bits && instruction->operand != nullptr) {
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type);
if (need_store) {
// It didn't get written to the result ptr. We do that now so that we do not have to spill
// the return operand.
// It didn't get written to the result ptr. We do that now.
ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true);
gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand));
}
}
// Prepare to be suspended. We might end up not having to suspend though.
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume");
// Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed.
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
}
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr,
all_ones, LLVMAtomicOrderingAcquire);
@ -2316,7 +2288,6 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder);
LLVMAddCase(switch_instr, zero, early_return_block);
LLVMAddCase(switch_instr, all_ones, bad_return_block);
@ -2325,90 +2296,63 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
gen_assertion(g, PanicMsgIdBadReturn, &instruction->base);
// The caller has not done an await yet. So we suspend at the return instruction, until a
// cancel or await is performed.
// There is no awaiter yet, but we're completely done.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMBuildRetVoid(g->builder);
// Add a safety check for when getting resumed by the awaiter.
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder);
gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait,
resume_them_block);
// We need to resume the caller by tail calling them,
// but first write through the result pointer and possibly
// error return trace pointer.
LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
// We need to resume the caller by tail calling them.
// That will happen when rendering IrInstructionReturn after running the defers/errdefers.
// We either got here from Entry (function call) or from the switch above
g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, "");
LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val };
LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb };
LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2);
if (ret_type_has_bits) {
// If the awaiter result pointer is non-null, we need to copy the result to there.
LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
g->cur_is_after_return = true;
LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr);
LLVMPositionBuilderAtEnd(g->builder, copy_block);
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, ret_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
ZigLLVMBuildMemCpy(g->builder,
dest_ptr_casted, abi_align,
src_ptr_casted, abi_align, byte_count_val, is_volatile);
LLVMBuildBr(g->builder, copy_end_block);
if (!operand_has_bits) {
return nullptr;
LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
frame_index_trace_arg(g, ret_type) + 1, "");
LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, "");
LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
}
return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true));
}
static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
LLVMSetTailCall(call_inst, true);
// Resume the caller by tail calling them.
ZigType *any_frame_type = get_any_frame_type(g, ret_type);
LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), "");
LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
if (fn_is_async(g->cur_fn)) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
bool ret_type_has_bits = type_has_bits(ret_type);
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
}
if (ret_type_has_bits) {
// If the awaiter result pointer is non-null, we need to copy the result to there.
LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
LLVMPositionBuilderAtEnd(g->builder, copy_block);
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, ret_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
ZigLLVMBuildMemCpy(g->builder,
dest_ptr_casted, abi_align,
src_ptr_casted, abi_align, byte_count_val, is_volatile);
LLVMBuildBr(g->builder, copy_end_block);
LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
}
// We need to resume the caller by tail calling them.
ZigType *any_frame_type = get_any_frame_type(g, ret_type);
LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
LLVMValueRef mask_val = LLVMConstNot(one);
LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, "");
LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val,
get_llvm_type(g, any_frame_type), "");
LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
g->cur_is_after_return = false;
gen_async_return(g, instruction);
return nullptr;
}
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
@ -3893,6 +3837,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
// even if prefix_arg_err_ret_stack is true, let the async function do its own
// initialization.
} else {
// async function called as a normal function
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
@ -3912,7 +3858,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (prefix_arg_err_ret_stack) {
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
frame_index_trace_arg(g, src_return_type), "");
frame_index_trace_arg(g, src_return_type) + 1, "");
LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
}
@ -4018,7 +3964,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
if (instruction->is_async) {
gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
if (instruction->new_stack != nullptr) {
return frame_result_loc;
}
@ -4028,7 +3974,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
@ -4744,8 +4690,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
{
LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
if (cur_err_ret_trace_val == nullptr) {
ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
return cur_err_ret_trace_val;
}
@ -5505,60 +5450,6 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node);
ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type;
LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume");
// supply null for the awaiter return pointer (no copy needed)
if (type_has_bits(result_type)) {
LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))),
awaiter_ret_ptr_ptr);
}
// supply null for the error return trace pointer
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))),
err_ret_trace_ptr_ptr);
}
LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val,
LLVMAtomicOrderingRelease);
LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend");
LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2);
LLVMAddCase(switch_instr, zero, complete_suspend_block);
LLVMAddCase(switch_instr, all_ones, early_return_block);
LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr);
return nullptr;
}
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
@ -5568,8 +5459,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Prepare to be suspended
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
// At this point resuming the function will do the correct thing.
// At this point resuming the function will continue from resume_bb.
// This code is as if it is running inside the suspend block.
// supply the awaiter return pointer
@ -5591,15 +5483,15 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
assert(my_err_ret_trace_val != nullptr);
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
frame_index_trace_arg(g, result_type) + 1, "");
LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
}
// caller's own frame pointer
LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
LLVMAtomicOrderingRelease, g->is_single_threaded);
LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
LLVMAtomicOrderingRelease);
LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
@ -5615,20 +5507,42 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
// Early return: The async function has already completed, but it is suspending before setting the result,
// populating the error return trace if applicable, and running the defers.
// Tail resume it now, so that it can complete.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
// Rely on the target to resume us from suspension.
LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
LLVMBuildRetVoid(g->builder);
// Early return: The async function has already completed. We must copy the result and
// the error return trace if applicable.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
if (type_has_bits(result_type) && result_loc != nullptr) {
LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, result_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
ZigLLVMBuildMemCpy(g->builder,
dest_ptr_casted, abi_align,
src_ptr_casted, abi_align, byte_count_val, is_volatile);
}
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
LLVMBuildBr(g->builder, end_bb);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
LLVMBuildBr(g->builder, end_bb);
LLVMPositionBuilderAtEnd(g->builder, end_bb);
if (type_has_bits(result_type) && result_loc != nullptr) {
return get_handle_value(g, result_loc, result_type, ptr_result_type);
}
@ -5640,7 +5554,7 @@ static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrIns
ZigType *frame_type = instruction->frame->value.type;
assert(frame_type->id == ZigTypeIdAnyFrame);
gen_resume(g, nullptr, frame, ResumeIdManual, nullptr);
gen_resume(g, nullptr, frame, ResumeIdManual);
return nullptr;
}
@ -5651,18 +5565,6 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
return gen_frame_size(g, fn_val);
}
static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable,
IrInstructionTestCancelRequested *instruction)
{
if (!fn_is_async(g->cur_fn))
return LLVMConstInt(LLVMInt1Type(), 0, false);
if (g->cur_is_after_return) {
return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), "");
} else {
zig_panic("TODO");
}
}
static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable,
IrInstructionSpillBegin *instruction)
{
@ -5798,8 +5700,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdDeclVarGen:
return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction);
case IrInstructionIdReturnBegin:
return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_render_return(g, executable, (IrInstructionReturn *)instruction);
case IrInstructionIdBinOp:
@ -5918,8 +5818,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction);
case IrInstructionIdErrorReturnTrace:
return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdCancel:
return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
@ -5952,8 +5850,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
case IrInstructionIdAwaitGen:
return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction);
case IrInstructionIdSpillBegin:
return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction);
case IrInstructionIdSpillEnd:
@ -7060,9 +6956,9 @@ static void do_code_gen(CodeGen *g) {
ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count);
err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type));
// populate g->stack_trace_type
(void)get_ptr_to_stack_trace_type(g);
g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type));
(void)get_llvm_type(g, get_stack_trace_type(g));
g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace",
get_abi_alignment(g, g->stack_trace_type));
} else {
g->cur_err_ret_trace_val_stack = nullptr;
}
@ -7204,18 +7100,12 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, "");
g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, "");
}
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type);
g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, "");
}
uint32_t trace_field_index_stack = UINT32_MAX;
if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
}
g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
frame_prev_val_index, "");
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
@ -7227,6 +7117,13 @@ static void do_code_gen(CodeGen *g) {
g->cur_resume_block_count += 1;
LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
if (trace_field_index_stack != UINT32_MAX) {
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
frame_index_trace_arg(g, fn_type_id->return_type), "");
LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr)));
LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr);
}
LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
@ -7273,8 +7170,6 @@ static void do_code_gen(CodeGen *g) {
LLVMDumpModule(g->module);
}
// in release mode, we're sooooo confident that we've generated correct ir,
// that we skip the verify module step in order to get better performance.
#ifndef NDEBUG
char *error = nullptr;
LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error);
@ -10157,6 +10052,11 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
}
bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) {
return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
(is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
if (is_async) {
return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn ||
codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
} else {
return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
!codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
}
}

View File

@ -526,10 +526,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) {
return IrInstructionIdReturn;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) {
return IrInstructionIdReturnBegin;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) {
return IrInstructionIdCast;
}
@ -974,10 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) {
return IrInstructionIdErrorUnion;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) {
return IrInstructionIdCancel;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
@ -1062,10 +1054,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) {
return IrInstructionIdResume;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) {
return IrInstructionIdTestCancelRequested;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) {
return IrInstructionIdSpillBegin;
}
@ -1138,18 +1126,6 @@ static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *sou
return &return_instruction->base;
}
static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand)
{
IrInstructionReturnBegin *return_instruction = ir_build_instruction<IrInstructionReturnBegin>(irb, scope, source_node);
return_instruction->operand = operand;
ir_ref_instruction(operand, irb->current_basic_block);
return &return_instruction->base;
}
static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, source_node);
const_instruction->base.value.type = irb->codegen->builtin_types.entry_void;
@ -3284,16 +3260,6 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN
return &instruction->base;
}
static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
IrInstructionCancel *instruction = ir_build_instruction<IrInstructionCancel>(irb, scope, source_node);
instruction->base.value.type = irb->codegen->builtin_types.entry_void;
instruction->frame = frame;
ir_ref_instruction(frame, irb->current_basic_block);
return &instruction->base;
}
static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *frame, ResultLoc *result_loc)
{
@ -3331,13 +3297,6 @@ static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *sou
return &instruction->base;
}
static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionTestCancelRequested *instruction = ir_build_instruction<IrInstructionTestCancelRequested>(irb, scope, source_node);
instruction->base.value.type = irb->codegen->builtin_types.entry_bool;
return &instruction->base;
}
static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand, SpillId spill_id)
{
@ -3532,7 +3491,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
}
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
return_value = ir_build_return_begin(irb, scope, node, return_value);
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
@ -3545,49 +3503,40 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return result;
}
bool should_inline = ir_should_inline(irb->exec, scope);
bool need_test_cancel = !should_inline && have_err_defers;
IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers");
IrBasicBlock *ok_block = need_test_cancel ?
ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block;
IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block;
IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
if (!have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, false);
}
IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline);
IrInstruction *err_is_comptime;
IrInstruction *is_comptime;
if (should_inline) {
err_is_comptime = force_comptime;
is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
} else {
err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
}
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime));
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
ir_set_cursor_at_end_and_append_block(irb, err_block);
if (have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, true);
}
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
ir_build_br(irb, scope, node, all_defers_block, err_is_comptime);
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
if (need_test_cancel) {
ir_set_cursor_at_end_and_append_block(irb, ok_block);
IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node);
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled,
all_defers_block, normal_defers_block, force_comptime));
ir_set_cursor_at_end_and_append_block(irb, ok_block);
if (have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, false);
}
if (all_defers_block != normal_defers_block) {
ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
ir_gen_defers_for_block(irb, scope, outer_scope, true);
ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
}
ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
ir_gen_defers_for_block(irb, scope, outer_scope, false);
ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
IrInstruction *result = ir_build_return(irb, scope, node, return_value);
@ -3619,8 +3568,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val,
SpillIdRetErrCode);
ir_build_return_begin(irb, scope, node, err_val);
err_val = ir_build_spill_end(irb, scope, node, spill_begin);
ResultLocReturn *result_loc_ret = allocate<ResultLocReturn>(1);
result_loc_ret->base.id = ResultLocIdReturn;
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
@ -3629,6 +3576,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
err_val = ir_build_spill_end(irb, scope, node, spill_begin);
IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@ -3847,38 +3795,10 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
return result;
// no need for save_err_ret_addr because this cannot return error
// but if it is a canceled async function we do need to run the errdefers
// only generate unconditional defers
ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result));
result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result));
size_t defer_counts[2];
ir_count_defers(irb, child_scope, outer_block_scope, defer_counts);
bool have_err_defers = defer_counts[ReturnKindError] > 0;
if (!have_err_defers) {
// only generate unconditional defers
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node);
IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers");
IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers");
IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt");
bool should_inline = ir_should_inline(irb->exec, child_scope);
IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node,
should_inline || !have_err_defers);
ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled,
all_defers_block, normal_defers_block, errdefers_is_comptime));
ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true);
ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
@ -7930,31 +7850,6 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args);
}
static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeCancel);
ZigFn *fn_entry = exec_fn_entry(irb->exec);
if (!fn_entry) {
add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition"));
return irb->codegen->invalid_instruction;
}
ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
if (existing_suspend_scope) {
if (!existing_suspend_scope->reported_err) {
ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block"));
add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
existing_suspend_scope->reported_err = true;
}
return irb->codegen->invalid_instruction;
}
IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr);
if (operand == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
return ir_build_cancel(irb, scope, node, operand);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
@ -8149,8 +8044,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
case NodeTypeErrorSetDecl:
return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
case NodeTypeCancel:
return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc);
case NodeTypeResume:
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
case NodeTypeAwaitExpr:
@ -8228,7 +8121,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
if (!instr_is_unreachable(result)) {
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result));
// no need for save_err_ret_addr because this cannot return error
ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
@ -8340,7 +8232,6 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
switch (instruction->id) {
case IrInstructionIdUnwrapErrPayload:
case IrInstructionIdUnionFieldPtr:
case IrInstructionIdReturnBegin:
continue;
default:
break;
@ -12745,17 +12636,17 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze
return ir_const_void(ira, &instruction->base);
}
static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) {
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
IrInstruction *operand = instruction->operand->child;
if (type_is_invalid(operand->value.type))
return ira->codegen->invalid_instruction;
return ir_unreach_error(ira);
if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, operand);
copy_const_val(&result->value, &operand->value, true);
return result;
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, nullptr);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
@ -12777,38 +12668,6 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst
return ir_unreach_error(ira);
}
IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, casted_operand);
copy_const_val(&result->value, &casted_operand->value, true);
return result;
}
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
IrInstruction *operand = instruction->operand->child;
if (type_is_invalid(operand->value.type))
return ir_unreach_error(ira);
if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, nullptr);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
// This cast might have been already done from IrInstructionReturnBegin but it also
// might not have, in the case of `try`.
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
if (type_is_invalid(casted_operand->value.type)) {
AstNode *source_node = ira->explicit_return_type_source_node;
if (source_node != nullptr) {
ErrorMsg *msg = ira->codegen->errors.last();
add_error_note(ira->codegen, msg, source_node,
buf_sprintf("return type declared here"));
}
return ir_unreach_error(ira);
}
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
@ -14540,8 +14399,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
IrInstruction *result = ir_const(ira, &instruction->base, optional_type);
@ -14559,7 +14418,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, instruction->optional);
new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen);
new_instruction->value.type = ptr_to_stack_trace_type;
return new_instruction;
}
}
@ -15800,6 +15659,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
parent_fn_entry->inferred_async_fn = impl_fn;
}
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
@ -15923,6 +15783,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
parent_fn_entry->inferred_async_fn = fn_entry;
}
IrInstruction *result_loc;
@ -24702,21 +24563,6 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
return casted_frame;
}
static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
if (type_is_invalid(frame->value.type))
return ira->codegen->invalid_instruction;
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
if (fn_entry->inferred_async_node == nullptr) {
fn_entry->inferred_async_node = instruction->base.source_node;
}
return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame);
}
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
if (type_is_invalid(frame->value.type))
@ -24772,15 +24618,6 @@ static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructio
return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
}
static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira,
IrInstructionTestCancelRequested *instruction)
{
if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
return ir_const_bool(ira, &instruction->base, false);
}
return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
}
static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) {
if (ir_should_inline(ira->new_irb.exec, instruction->base.scope))
return ir_const_void(ira, &instruction->base);
@ -24848,8 +24685,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdAwaitGen:
zig_unreachable();
case IrInstructionIdReturnBegin:
return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction);
case IrInstructionIdConst:
@ -25070,8 +24905,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdErrorUnion:
return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction);
case IrInstructionIdCancel:
return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
@ -25114,8 +24947,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction);
case IrInstructionIdAwaitSrc:
return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction);
case IrInstructionIdSpillBegin:
return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction);
case IrInstructionIdSpillEnd:
@ -25209,7 +25040,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdStorePtr:
case IrInstructionIdCallSrc:
case IrInstructionIdCallGen:
case IrInstructionIdReturnBegin:
case IrInstructionIdReturn:
case IrInstructionIdUnreachable:
case IrInstructionIdSetCold:
@ -25235,7 +25065,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdAtomicRmw:
@ -25355,7 +25184,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
case IrInstructionIdTestCancelRequested:
case IrInstructionIdSpillEnd:
return false;

View File

@ -64,12 +64,6 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) {
}
}
static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) {
fprintf(irp->f, "@returnBegin(");
ir_print_other_instruction(irp, instruction->operand);
fprintf(irp->f, ")");
}
static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) {
fprintf(irp->f, "return ");
ir_print_other_instruction(irp, instruction->operand);
@ -1394,11 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct
ir_print_other_instruction(irp, instruction->payload);
}
static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
fprintf(irp->f, "cancel ");
ir_print_other_instruction(irp, instruction->frame);
}
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
fprintf(irp->f, "@atomicRmw(");
if (instruction->operand_type != nullptr) {
@ -1549,10 +1538,6 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction)
fprintf(irp->f, ")");
}
static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) {
fprintf(irp->f, "@testCancelRequested()");
}
static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) {
fprintf(irp->f, "@spillBegin(");
ir_print_other_instruction(irp, instruction->operand);
@ -1570,9 +1555,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
zig_unreachable();
case IrInstructionIdReturnBegin:
ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction);
break;
case IrInstructionIdReturn:
ir_print_return(irp, (IrInstructionReturn *)instruction);
break;
@ -1966,9 +1948,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdErrorUnion:
ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction);
break;
case IrInstructionIdCancel:
ir_print_cancel(irp, (IrInstructionCancel *)instruction);
break;
case IrInstructionIdAtomicRmw:
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
break;
@ -2047,9 +2026,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAwaitGen:
ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
break;
case IrInstructionIdTestCancelRequested:
ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction);
break;
case IrInstructionIdSpillBegin:
ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction);
break;

View File

@ -1167,7 +1167,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) {
// <- AsmExpr
// / IfExpr
// / KEYWORD_break BreakLabel? Expr?
// / KEYWORD_cancel Expr
// / KEYWORD_comptime Expr
// / KEYWORD_continue BreakLabel?
// / KEYWORD_resume Expr
@ -1195,14 +1194,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) {
return res;
}
Token *cancel = eat_token_if(pc, TokenIdKeywordCancel);
if (cancel != nullptr) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel);
res->data.cancel_expr.expr = expr;
return res;
}
Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime);
if (comptime != nullptr) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
@ -3035,9 +3026,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
break;
case NodeTypeCancel:
visit_field(&node->data.cancel_expr.expr, visit, context);
break;
case NodeTypeResume:
visit_field(&node->data.resume_expr.expr, visit, context);
break;

View File

@ -114,7 +114,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
{"break", TokenIdKeywordBreak},
{"cancel", TokenIdKeywordCancel},
{"catch", TokenIdKeywordCatch},
{"comptime", TokenIdKeywordCompTime},
{"const", TokenIdKeywordConst},
@ -1531,7 +1530,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordAwait: return "await";
case TokenIdKeywordResume: return "resume";
case TokenIdKeywordSuspend: return "suspend";
case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
case TokenIdKeywordAnyFrame: return "anyframe";

View File

@ -58,7 +58,6 @@ enum TokenId {
TokenIdKeywordAsync,
TokenIdKeywordAwait,
TokenIdKeywordBreak,
TokenIdKeywordCancel,
TokenIdKeywordCatch,
TokenIdKeywordCompTime,
TokenIdKeywordConst,

View File

@ -1301,7 +1301,7 @@ async fn testFsWatch(loop: *Loop) !void {
const ev = try async watch.channel.get();
var ev_consumed = false;
defer if (!ev_consumed) cancel ev;
defer if (!ev_consumed) await ev;
// overwrite line 2
const fd = try await try async openReadWrite(loop, file_path, File.default_mode);

View File

@ -110,7 +110,7 @@ async fn testFuture(loop: *Loop) void {
const b_result = await b;
const result = a_result + b_result;
cancel c;
await c;
testing.expect(result == 12);
}

View File

@ -27,17 +27,6 @@ pub fn Group(comptime ReturnType: type) type {
};
}
/// Cancel all the outstanding frames. Can be called even if wait was already called.
pub fn deinit(self: *Self) void {
while (self.frame_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
/// Add a frame to the group. Thread-safe.
pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node);
@ -64,13 +53,14 @@ pub fn Group(comptime ReturnType: type) type {
const held = self.lock.acquire();
defer held.release();
var result: ReturnType = {};
while (self.frame_stack.pop()) |node| {
if (Error == void) {
await node.data;
} else {
(await node.data) catch |err| {
self.deinit();
return err;
result = err;
};
}
}
@ -81,11 +71,11 @@ pub fn Group(comptime ReturnType: type) type {
await handle;
} else {
(await handle) catch |err| {
self.deinit();
return err;
result = err;
};
}
}
return result;
}
};
}

View File

@ -54,7 +54,7 @@ pub const Server = struct {
self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd));
self.accept_frame = async Server.handler(self);
errdefer cancel self.accept_frame.?;
errdefer await self.accept_frame.?;
self.listen_resume_node.handle = self.accept_frame.?;
try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
@ -71,7 +71,7 @@ pub const Server = struct {
}
pub fn deinit(self: *Server) void {
if (self.accept_frame) |accept_frame| cancel accept_frame;
if (self.accept_frame) |accept_frame| await accept_frame;
if (self.sockfd) |sockfd| os.close(sockfd);
}
@ -274,13 +274,9 @@ test "listen on a port, send bytes, receive bytes" {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592
defer socket.close();
// TODO guarantee elision of this allocation
const next_handler = errorableHandler(self, _addr, socket) catch |err| {
std.debug.panic("unable to handle connection: {}\n", err);
};
suspend {
cancel @frame();
}
}
async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592

View File

@ -814,7 +814,6 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// <- AsmExpr
/// / IfExpr
/// / KEYWORD_break BreakLabel? Expr?
/// / KEYWORD_cancel Expr
/// / KEYWORD_comptime Expr
/// / KEYWORD_continue BreakLabel?
/// / KEYWORD_resume Expr
@ -839,20 +838,6 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
if (eatToken(it, .Keyword_cancel)) |token| {
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
.ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
});
const node = try arena.create(Node.PrefixOp);
node.* = Node.PrefixOp{
.base = Node{ .id = .PrefixOp },
.op_token = token,
.op = Node.PrefixOp.Op.Cancel,
.rhs = expr_node,
};
return &node.base;
}
if (eatToken(it, .Keyword_comptime)) |token| {
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
.ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },

View File

@ -2115,10 +2115,10 @@ test "zig fmt: async functions" {
\\ await p;
\\}
\\
\\test "suspend, resume, cancel" {
\\test "suspend, resume, await" {
\\ const p: anyframe = async testAsyncSeq();
\\ resume p;
\\ cancel p;
\\ await p;
\\}
\\
);

View File

@ -21,7 +21,6 @@ pub const Token = struct {
Keyword{ .bytes = "await", .id = Id.Keyword_await },
Keyword{ .bytes = "break", .id = Id.Keyword_break },
Keyword{ .bytes = "catch", .id = Id.Keyword_catch },
Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel },
Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime },
Keyword{ .bytes = "const", .id = Id.Keyword_const },
Keyword{ .bytes = "continue", .id = Id.Keyword_continue },
@ -151,7 +150,6 @@ pub const Token = struct {
Keyword_async,
Keyword_await,
Keyword_break,
Keyword_cancel,
Keyword_catch,
Keyword_comptime,
Keyword_const,

View File

@ -61,13 +61,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"runtime-known async function called",
\\export fn entry() void {
\\ _ = async amain();
\\}
\\fn amain() void {
\\ var ptr = afunc;
\\ _ = ptr();
\\}
\\
\\async fn afunc() void {}
,
"tmp.zig:3:12: error: function is not comptime-known; @asyncCall required",
"tmp.zig:6:12: error: function is not comptime-known; @asyncCall required",
);
cases.add(
@ -3388,7 +3390,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(Foo)); }
,
"tmp.zig:5:18: error: unable to evaluate constant expression",
"tmp.zig:5:25: error: unable to evaluate constant expression",
"tmp.zig:2:12: note: called from here",
"tmp.zig:2:8: note: called from here",
);

View File

@ -41,7 +41,6 @@ comptime {
_ = @import("behavior/bugs/920.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/cancel.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/defer.zig");

View File

@ -150,7 +150,7 @@ test "coroutine suspend, resume" {
seq('a');
var f = async testAsyncSeq();
seq('c');
cancel f;
await f;
seq('g');
}
@ -271,7 +271,6 @@ test "async function with dot syntax" {
}
};
const p = async S.foo();
// can't cancel in tests because they are non-async functions
expect(S.y == 2);
}
@ -286,7 +285,7 @@ test "async fn pointer in a struct field" {
comptime expect(@typeOf(f) == anyframe->void);
expect(data == 2);
resume f;
expect(data == 2);
expect(data == 4);
_ = async doTheAwait(f);
expect(data == 4);
}
@ -394,7 +393,6 @@ async fn printTrace(p: anyframe->(anyerror!void)) void {
test "break from suspend" {
var my_result: i32 = 1;
const p = async testBreakFromSuspend(&my_result);
// can't cancel here
std.testing.expect(my_result == 2);
}
async fn testBreakFromSuspend(my_result: *i32) void {
@ -530,45 +528,6 @@ test "call async function which has struct return type" {
S.doTheTest();
}
test "errdefers in scope get run when canceling async fn call" {
const S = struct {
var frame: anyframe = undefined;
var x: u32 = 0;
fn doTheTest() void {
x = 9;
_ = async cancelIt();
resume frame;
expect(x == 6);
x = 9;
_ = async awaitIt();
resume frame;
expect(x == 11);
}
fn cancelIt() void {
var f = async func();
cancel f;
}
fn awaitIt() void {
var f = async func();
await f;
}
fn func() void {
defer x += 1;
errdefer x /= 2;
defer x += 1;
suspend {
frame = @frame();
}
}
};
S.doTheTest();
}
test "pass string literal to async function" {
const S = struct {
var frame: anyframe = undefined;
@ -590,7 +549,7 @@ test "pass string literal to async function" {
S.doTheTest();
}
test "cancel inside an errdefer" {
test "await inside an errdefer" {
const S = struct {
var frame: anyframe = undefined;
@ -601,7 +560,7 @@ test "cancel inside an errdefer" {
fn amainWrap() !void {
var foo = async func();
errdefer cancel foo;
errdefer await foo;
return error.Bad;
}
@ -614,35 +573,6 @@ test "cancel inside an errdefer" {
S.doTheTest();
}
test "combining try with errdefer cancel" {
const S = struct {
var frame: anyframe = undefined;
var ok = false;
fn doTheTest() void {
_ = async amain();
resume frame;
expect(ok);
}
fn amain() !void {
var f = async func("https://example.com/");
errdefer cancel f;
_ = try await f;
}
fn func(url: []const u8) ![]u8 {
errdefer ok = true;
frame = @frame();
suspend;
return error.Bad;
}
};
S.doTheTest();
}
test "try in an async function with error union and non-zero-bit payload" {
const S = struct {
var frame: anyframe = undefined;
@ -730,14 +660,22 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si
fn amain() !void {
const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
var download_frame = async fetchUrl(allocator, "https://example.com/");
errdefer cancel download_frame;
var download_awaited = false;
errdefer if (!download_awaited) {
if (await download_frame) |x| allocator.free(x) else |_| {}
};
var file_frame = async readFile(allocator, "something.txt");
errdefer cancel file_frame;
var file_awaited = false;
errdefer if (!file_awaited) {
if (await file_frame) |x| allocator.free(x) else |_| {}
};
download_awaited = true;
const download_text = try await download_frame;
defer allocator.free(download_text);
file_awaited = true;
const file_text = try await file_frame;
defer allocator.free(file_text);

View File

@ -1,115 +0,0 @@
const std = @import("std");
const expect = std.testing.expect;
var defer_f1: bool = false;
var defer_f2: bool = false;
var defer_f3: bool = false;
var f3_frame: anyframe = undefined;
test "cancel forwards" {
_ = async atest1();
resume f3_frame;
}
fn atest1() void {
const p = async f1();
cancel &p;
expect(defer_f1);
expect(defer_f2);
expect(defer_f3);
}
async fn f1() void {
defer {
defer_f1 = true;
}
var f2_frame = async f2();
await f2_frame;
}
async fn f2() void {
defer {
defer_f2 = true;
}
f3();
}
async fn f3() void {
f3_frame = @frame();
defer {
defer_f3 = true;
}
suspend;
}
var defer_b1: bool = false;
var defer_b2: bool = false;
var defer_b3: bool = false;
var defer_b4: bool = false;
test "cancel backwards" {
var b1_frame = async b1();
resume b4_handle;
_ = async awaitAFrame(&b1_frame);
expect(defer_b1);
expect(defer_b2);
expect(defer_b3);
expect(defer_b4);
}
async fn b1() void {
defer {
defer_b1 = true;
}
b2();
}
var b4_handle: anyframe->void = undefined;
async fn b2() void {
const b3_handle = async b3();
resume b4_handle;
defer {
defer_b2 = true;
}
const value = await b3_handle;
expect(value == 1234);
}
async fn b3() i32 {
defer {
defer_b3 = true;
}
b4();
return 1234;
}
async fn b4() void {
defer {
defer_b4 = true;
}
suspend {
b4_handle = @frame();
}
suspend;
}
fn awaitAFrame(f: anyframe->void) void {
await f;
}
test "cancel on a non-pointer" {
const S = struct {
fn doTheTest() void {
_ = async atest();
}
fn atest() void {
var f = async func();
cancel f;
}
fn func() void {
suspend;
}
};
S.doTheTest();
}