diff --git a/doc/docgen.zig b/doc/docgen.zig index 6ce5902dc..0ef38dc77 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -786,9 +786,10 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok .Keyword_for, .Keyword_if, .Keyword_inline, - .Keyword_noinline, .Keyword_nakedcc, .Keyword_noalias, + .Keyword_noasync, + .Keyword_noinline, .Keyword_or, .Keyword_orelse, .Keyword_packed, diff --git a/src/all_types.hpp b/src/all_types.hpp index bc6ab4e82..ef159986a 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -758,11 +758,17 @@ struct AstNodeUnwrapOptional { AstNode *expr; }; +enum CallModifier { + CallModifierNone, + CallModifierAsync, + CallModifierNoAsync, + CallModifierBuiltin, +}; + struct AstNodeFnCallExpr { AstNode *fn_ref_expr; ZigList params; - bool is_builtin; - bool is_async; + CallModifier modifier; bool seen; // used by @compileLog }; @@ -2730,8 +2736,10 @@ struct IrInstructionCallSrc { ResultLoc *result_loc; IrInstruction *new_stack; + FnInline fn_inline; - bool is_async; + CallModifier modifier; + bool is_async_call_builtin; bool is_comptime; }; @@ -2745,10 +2753,11 @@ struct IrInstructionCallGen { IrInstruction **args; IrInstruction *result_loc; IrInstruction *frame_result_loc; - IrInstruction *new_stack; + FnInline fn_inline; - bool is_async; + CallModifier modifier; + bool is_async_call_builtin; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index 386ee4ec4..fa93a9764 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4214,7 +4214,7 @@ void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await here is a suspend point")); } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr && - fn->inferred_async_node->data.fn_call_expr.is_builtin) + fn->inferred_async_node->data.fn_call_expr.modifier == CallModifierBuiltin) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("@frame() causes function to be async")); @@ -4228,8 +4228,10 @@ void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { // ErrorIsAsync - yes async // ErrorSemanticAnalyzeFail - compile error emitted result is invalid static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode *call_node, - bool must_not_be_async) + bool must_not_be_async, CallModifier modifier) { + if (modifier == CallModifierNoAsync) + return ErrorNone; if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) return ErrorNone; if (callee->anal_state == FnAnalStateReady) { @@ -4312,7 +4314,9 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { // TODO function pointer call here, could be anything continue; } - switch (analyze_callee_async(g, fn, call->fn_entry, call->base.source_node, must_not_be_async)) { + switch (analyze_callee_async(g, fn, call->fn_entry, call->base.source_node, must_not_be_async, + call->modifier)) + { case ErrorSemanticAnalyzeFail: fn->anal_state = FnAnalStateInvalid; return; @@ -4329,7 +4333,9 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { } for (size_t i = 0; i < fn->await_list.length; i += 1) { IrInstructionAwaitGen *await = fn->await_list.at(i); - switch (analyze_callee_async(g, fn, await->target_fn, await->base.source_node, must_not_be_async)) { + switch (analyze_callee_async(g, fn, await->target_fn, await->base.source_node, must_not_be_async, + CallModifierNone)) + { case ErrorSemanticAnalyzeFail: fn->anal_state = FnAnalStateInvalid; return; diff --git a/src/ast_render.cpp b/src/ast_render.cpp index fedd46a48..537a74d7b 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -698,11 +698,18 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { } case NodeTypeFnCallExpr: { - if (node->data.fn_call_expr.is_builtin) { - fprintf(ar->f, "@"); - } - if (node->data.fn_call_expr.is_async) { - fprintf(ar->f, "async "); + switch (node->data.fn_call_expr.modifier) { + case CallModifierNone: + break; + case CallModifierBuiltin: + fprintf(ar->f, "@"); + break; + case CallModifierAsync: + fprintf(ar->f, "async "); + break; + case CallModifierNoAsync: + fprintf(ar->f, "noasync "); + break; } AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType); diff --git a/src/codegen.cpp b/src/codegen.cpp index 9c8ccd704..03c253ad4 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -186,6 +186,9 @@ static void generate_error_name_table(CodeGen *g); static bool value_is_all_undef(CodeGen *g, ConstExprValue *const_val); static void gen_undef_init(CodeGen *g, uint32_t ptr_align_bytes, ZigType *value_type, LLVMValueRef ptr); static LLVMValueRef build_alloca(CodeGen *g, ZigType *type_entry, const char *name, uint32_t alignment); +static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_instr, + LLVMValueRef target_frame_ptr, ZigType *result_type, ZigType *ptr_result_type, + LLVMValueRef result_loc, bool non_async); static void addLLVMAttr(LLVMValueRef val, LLVMAttributeIndex attr_index, const char *attr_name) { unsigned kind_id = LLVMGetEnumAttributeKindForName(attr_name, strlen(attr_name)); @@ -3842,7 +3845,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef ret_ptr; if (callee_is_async) { if (instruction->new_stack == nullptr) { - if (instruction->is_async) { + if (instruction->modifier == CallModifierAsync) { frame_result_loc = result_loc; } else { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); @@ -3883,7 +3886,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } } - if (instruction->is_async) { + if (instruction->modifier == CallModifierAsync) { if (instruction->new_stack == nullptr) { awaiter_init_val = zero; @@ -3908,9 +3911,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr // even if prefix_arg_err_ret_stack is true, let the async function do its own // initialization. } else { - // async function called as a normal function - - awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer + if (instruction->modifier == CallModifierNoAsync && !fn_is_async(g->cur_fn)) { + // Async function called as a normal function, and calling function is not async. + // This is allowed because it was called with `noasync` which asserts that it will + // never suspend. + awaiter_init_val = zero; + } else { + // async function called as a normal function + awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer + } if (ret_has_bits) { if (result_loc == nullptr) { // return type is a scalar, but we still need a pointer to it. Use the async fn frame. @@ -3951,7 +3960,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, ""); LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); } - } else if (instruction->is_async) { + } else if (instruction->modifier == CallModifierAsync) { // Async call of blocking function if (instruction->new_stack != nullptr) { zig_panic("TODO @asyncCall of non-async function"); @@ -4048,13 +4057,20 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values.at(arg_i)); } - if (instruction->is_async) { + if (instruction->modifier == CallModifierAsync) { gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); if (instruction->new_stack != nullptr) { return LLVMBuildBitCast(g->builder, frame_result_loc, get_llvm_type(g, instruction->base.value.type), ""); } return nullptr; + } else if (instruction->modifier == CallModifierNoAsync && !fn_is_async(g->cur_fn)) { + gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); + + ZigType *result_type = instruction->base.value.type; + ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); + return gen_await_early_return(g, &instruction->base, frame_result_loc, + result_type, ptr_result_type, result_loc, true); } else { ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); @@ -4082,7 +4098,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->new_stack == nullptr || instruction->is_async_call_builtin) { result = ZigLLVMBuildCall(g->builder, fn_val, gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, ""); - } else if (instruction->is_async) { + } else if (instruction->modifier == CallModifierAsync) { zig_panic("TODO @asyncCall of non-async function"); } else { LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g); @@ -4107,7 +4123,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef store_instr = LLVMBuildStore(g->builder, result, result_loc); LLVMSetAlignment(store_instr, get_ptr_align(g, instruction->result_loc->value.type)); return result_loc; - } else if (!callee_is_async && instruction->is_async) { + } else if (!callee_is_async && instruction->modifier == CallModifierAsync) { LLVMBuildStore(g->builder, result, ret_ptr); return result_loc; } else { @@ -7104,6 +7120,28 @@ static void do_code_gen(CodeGen *g) { } if (!is_async) { + // allocate async frames for noasync calls & awaits to async functions + for (size_t i = 0; i < fn_table_entry->call_list.length; i += 1) { + IrInstructionCallGen *call = fn_table_entry->call_list.at(i); + if (call->fn_entry == nullptr) + continue; + if (!fn_is_async(call->fn_entry)) + continue; + if (call->modifier != CallModifierNoAsync) + continue; + if (call->frame_result_loc != nullptr) + continue; + ZigType *callee_frame_type = get_fn_frame_type(g, call->fn_entry); + IrInstructionAllocaGen *alloca_gen = allocate(1); + alloca_gen->base.id = IrInstructionIdAllocaGen; + alloca_gen->base.source_node = call->base.source_node; + alloca_gen->base.scope = call->base.scope; + alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false); + alloca_gen->base.ref_count = 1; + alloca_gen->name_hint = ""; + fn_table_entry->alloca_gen_list.append(alloca_gen); + call->frame_result_loc = &alloca_gen->base; + } // allocate temporary stack data for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); diff --git a/src/ir.cpp b/src/ir.cpp index 2da8dea67..53ce2d89e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1389,7 +1389,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, bool is_async_call_builtin, + bool is_comptime, FnInline fn_inline, CallModifier modifier, bool is_async_call_builtin, IrInstruction *new_stack, ResultLoc *result_loc) { IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node); @@ -1399,7 +1399,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s call_instruction->fn_inline = fn_inline; call_instruction->args = args; call_instruction->arg_count = arg_count; - call_instruction->is_async = is_async; + call_instruction->modifier = modifier; call_instruction->is_async_call_builtin = is_async_call_builtin; call_instruction->new_stack = new_stack; call_instruction->result_loc = result_loc; @@ -1407,7 +1407,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], irb->current_basic_block); - if (is_async && new_stack != nullptr) { + if (modifier == CallModifierAsync && new_stack != nullptr) { // in this case the arg at the end is the return pointer ir_ref_instruction(args[arg_count], irb->current_basic_block); } @@ -1418,7 +1418,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - FnInline fn_inline, bool is_async, IrInstruction *new_stack, bool is_async_call_builtin, + FnInline fn_inline, CallModifier modifier, IrInstruction *new_stack, bool is_async_call_builtin, IrInstruction *result_loc, ZigType *return_type) { IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb, @@ -1429,7 +1429,7 @@ static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *so call_instruction->fn_inline = fn_inline; call_instruction->args = args; call_instruction->arg_count = arg_count; - call_instruction->is_async = is_async; + call_instruction->modifier = modifier; call_instruction->is_async_call_builtin = is_async_call_builtin; call_instruction->new_stack = new_stack; call_instruction->result_loc = result_loc; @@ -4412,10 +4412,10 @@ static IrInstruction *ir_gen_async_call(IrBuilder *irb, Scope *scope, AstNode *a args[arg_count] = ret_ptr; - bool is_async = await_node == nullptr; + CallModifier modifier = (await_node == nullptr) ? CallModifierAsync : CallModifierNone; bool is_async_call_builtin = true; IrInstruction *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args, false, - FnInlineAuto, is_async, is_async_call_builtin, bytes, result_loc); + FnInlineAuto, modifier, is_async_call_builtin, bytes, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } @@ -5302,7 +5302,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, - fn_inline, false, false, nullptr, result_loc); + fn_inline, CallModifierNone, false, nullptr, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdNewStackCall: @@ -5335,7 +5335,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, - FnInlineAuto, false, false, new_stack, result_loc); + FnInlineAuto, CallModifierNone, false, new_stack, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdAsyncCall: @@ -5624,7 +5624,7 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node { assert(node->type == NodeTypeFnCallExpr); - if (node->data.fn_call_expr.is_builtin) + if (node->data.fn_call_expr.modifier == CallModifierBuiltin) return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc); AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; @@ -5641,9 +5641,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node return args[i]; } - bool is_async = node->data.fn_call_expr.is_async; IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, - FnInlineAuto, is_async, false, nullptr, result_loc); + FnInlineAuto, node->data.fn_call_expr.modifier, false, nullptr, result_loc); return ir_lval_wrap(irb, scope, fn_call, lval, result_loc); } @@ -7937,7 +7936,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n assert(node->type == NodeTypeAwaitExpr); AstNode *expr_node = node->data.await_expr.expr; - if (expr_node->type == NodeTypeFnCallExpr && expr_node->data.fn_call_expr.is_builtin) { + if (expr_node->type == NodeTypeFnCallExpr && expr_node->data.fn_call_expr.modifier == CallModifierBuiltin) { AstNode *fn_ref_expr = expr_node->data.fn_call_expr.fn_ref_expr; Buf *name = fn_ref_expr->data.symbol_expr.symbol; auto entry = irb->codegen->builtin_fn_table.maybe_get(name); @@ -15408,7 +15407,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_ret_type); IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, - arg_count, casted_args, FnInlineAuto, true, casted_new_stack, + arg_count, casted_args, FnInlineAuto, CallModifierAsync, casted_new_stack, call_instruction->is_async_call_builtin, ret_ptr, anyframe_type); return &call_gen->base; } else { @@ -15422,8 +15421,8 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc if (type_is_invalid(result_loc->value.type)) return ira->codegen->invalid_instruction; return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, - casted_args, FnInlineAuto, true, casted_new_stack, call_instruction->is_async_call_builtin, - result_loc, frame_type)->base; + casted_args, FnInlineAuto, CallModifierAsync, casted_new_stack, + call_instruction->is_async_call_builtin, result_loc, frame_type)->base; } } static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, @@ -16174,7 +16173,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c return ira->codegen->invalid_instruction; size_t impl_param_count = impl_fn_type_id->param_count; - if (call_instruction->is_async) { + if (call_instruction->modifier == CallModifierAsync) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, nullptr, casted_args, impl_param_count, casted_new_stack); return ir_finish_anal(ira, result); @@ -16201,14 +16200,17 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c result_loc = nullptr; } - if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + if (impl_fn_type_id->cc == CallingConventionAsync && + parent_fn_entry->inferred_async_node == nullptr && + call_instruction->modifier != CallModifierNoAsync) + { parent_fn_entry->inferred_async_node = fn_ref->source_node; parent_fn_entry->inferred_async_fn = impl_fn; } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, fn_inline, - false, casted_new_stack, call_instruction->is_async_call_builtin, result_loc, + call_instruction->modifier, casted_new_stack, call_instruction->is_async_call_builtin, result_loc, impl_fn_type_id->return_type); if (get_scope_typeof(call_instruction->base.scope) == nullptr) { @@ -16325,13 +16327,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value.type)) return ira->codegen->invalid_instruction; - if (call_instruction->is_async) { + if (call_instruction->modifier == CallModifierAsync) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, casted_new_stack); return ir_finish_anal(ira, result); } - if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + if (fn_type_id->cc == CallingConventionAsync && + parent_fn_entry->inferred_async_node == nullptr && + call_instruction->modifier != CallModifierNoAsync) + { parent_fn_entry->inferred_async_node = fn_ref->source_node; parent_fn_entry->inferred_async_fn = fn_entry; } @@ -16358,7 +16363,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, - call_param_count, casted_args, fn_inline, false, casted_new_stack, + call_param_count, casted_args, fn_inline, call_instruction->modifier, casted_new_stack, call_instruction->is_async_call_builtin, result_loc, return_type); if (get_scope_typeof(call_instruction->base.scope) == nullptr) { parent_fn_entry->call_list.append(new_call_instruction); diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 85d89cdb8..30b873524 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -608,8 +608,17 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) { } static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) { - if (call_instruction->is_async) { - fprintf(irp->f, "async "); + switch (call_instruction->modifier) { + case CallModifierNone: + break; + case CallModifierAsync: + fprintf(irp->f, "async "); + break; + case CallModifierNoAsync: + fprintf(irp->f, "noasync "); + break; + case CallModifierBuiltin: + zig_unreachable(); } if (call_instruction->fn_entry) { fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); @@ -629,8 +638,17 @@ static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instructi } static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) { - if (call_instruction->is_async) { - fprintf(irp->f, "async "); + switch (call_instruction->modifier) { + case CallModifierNone: + break; + case CallModifierAsync: + fprintf(irp->f, "async "); + break; + case CallModifierNoAsync: + fprintf(irp->f, "noasync "); + break; + case CallModifierBuiltin: + zig_unreachable(); } if (call_instruction->fn_entry) { fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); diff --git a/src/parser.cpp b/src/parser.cpp index ba8757e4a..96071daa0 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -113,7 +113,7 @@ static AstNode *ast_parse_multiply_op(ParseContext *pc); static AstNode *ast_parse_prefix_op(ParseContext *pc); static AstNode *ast_parse_prefix_type_op(ParseContext *pc); static AstNode *ast_parse_suffix_op(ParseContext *pc); -static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc); +static AstNode *ast_parse_fn_call_arguments(ParseContext *pc); static AstNode *ast_parse_array_type_start(ParseContext *pc); static AstNode *ast_parse_ptr_type_start(ParseContext *pc); static AstNode *ast_parse_container_decl_auto(ParseContext *pc); @@ -1403,12 +1403,14 @@ static AstNode *ast_parse_error_union_expr(ParseContext *pc) { } // SuffixExpr -// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments +// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments +// / KEYWORD_noasync PrimaryTypeExpr SuffixOp* FnCallArguments // / PrimaryTypeExpr (SuffixOp / FnCallArguments)* static AstNode *ast_parse_suffix_expr(ParseContext *pc) { - Token *async_token = eat_token_if(pc, TokenIdKeywordAsync); - if (async_token != nullptr) { - if (eat_token_if(pc, TokenIdKeywordFn) != nullptr) { + Token *async_token = eat_token(pc); + bool is_async = async_token->id == TokenIdKeywordAsync; + if (is_async || async_token->id == TokenIdKeywordNoAsync) { + if (is_async && eat_token_if(pc, TokenIdKeywordFn) != nullptr) { // HACK: If we see the keyword `fn`, then we assume that // we are parsing an async fn proto, and not a call. // We therefore put back all tokens consumed by the async @@ -1447,24 +1449,24 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) { child = suffix; } - // TODO: Both *_async_prefix and *_fn_call_argumnets returns an + // TODO: Both *_async_prefix and *_fn_call_arguments returns an // AstNode *. All we really want here is the arguments of // the call we parse. We therefor "leak" the node for now. // Wait till we get async rework to fix this. - AstNode *args = ast_parse_fn_call_argumnets(pc); + AstNode *args = ast_parse_fn_call_arguments(pc); if (args == nullptr) ast_invalid_token_error(pc, peek_token(pc)); assert(args->type == NodeTypeFnCallExpr); AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async_token); - res->data.fn_call_expr.is_async = true; + res->data.fn_call_expr.modifier = is_async ? CallModifierAsync : CallModifierNoAsync; res->data.fn_call_expr.seen = false; res->data.fn_call_expr.fn_ref_expr = child; res->data.fn_call_expr.params = args->data.fn_call_expr.params; - res->data.fn_call_expr.is_builtin = false; return res; } + put_back_token(pc); AstNode *res = ast_parse_primary_type_expr(pc); if (res == nullptr) @@ -1496,7 +1498,7 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) { continue; } - AstNode * call = ast_parse_fn_call_argumnets(pc); + AstNode * call = ast_parse_fn_call_arguments(pc); if (call != nullptr) { assert(call->type == NodeTypeFnCallExpr); call->data.fn_call_expr.fn_ref_expr = res; @@ -1552,7 +1554,7 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) { name = buf_create_from_str("export"); } - AstNode *res = ast_expect(pc, ast_parse_fn_call_argumnets); + AstNode *res = ast_expect(pc, ast_parse_fn_call_arguments); AstNode *name_sym = ast_create_node(pc, NodeTypeSymbol, token); name_sym->data.symbol_expr.symbol = name; @@ -1560,7 +1562,7 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) { res->line = at_sign->start_line; res->column = at_sign->start_column; res->data.fn_call_expr.fn_ref_expr = name_sym; - res->data.fn_call_expr.is_builtin = true; + res->data.fn_call_expr.modifier = CallModifierBuiltin; return res; } @@ -2672,7 +2674,7 @@ static AstNode *ast_parse_suffix_op(ParseContext *pc) { } // FnCallArguments <- LPAREN ExprList RPAREN -static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc) { +static AstNode *ast_parse_fn_call_arguments(ParseContext *pc) { Token *paren = eat_token_if(pc, TokenIdLParen); if (paren == nullptr) return nullptr; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 8ef320331..465f65228 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -130,9 +130,10 @@ static const struct ZigKeyword zig_keywords[] = { {"for", TokenIdKeywordFor}, {"if", TokenIdKeywordIf}, {"inline", TokenIdKeywordInline}, - {"noinline", TokenIdKeywordNoInline}, {"nakedcc", TokenIdKeywordNakedCC}, {"noalias", TokenIdKeywordNoAlias}, + {"noasync", TokenIdKeywordNoAsync}, + {"noinline", TokenIdKeywordNoInline}, {"null", TokenIdKeywordNull}, {"or", TokenIdKeywordOr}, {"orelse", TokenIdKeywordOrElse}, @@ -1552,9 +1553,10 @@ const char * token_name(TokenId id) { case TokenIdKeywordFor: return "for"; case TokenIdKeywordIf: return "if"; case TokenIdKeywordInline: return "inline"; - case TokenIdKeywordNoInline: return "noinline"; case TokenIdKeywordNakedCC: return "nakedcc"; case TokenIdKeywordNoAlias: return "noalias"; + case TokenIdKeywordNoAsync: return "noasync"; + case TokenIdKeywordNoInline: return "noinline"; case TokenIdKeywordNull: return "null"; case TokenIdKeywordOr: return "or"; case TokenIdKeywordOrElse: return "orelse"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 70d828b39..a3d1a6000 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -78,6 +78,7 @@ enum TokenId { TokenIdKeywordLinkSection, TokenIdKeywordNakedCC, TokenIdKeywordNoAlias, + TokenIdKeywordNoAsync, TokenIdKeywordNull, TokenIdKeywordOr, TokenIdKeywordOrElse, diff --git a/src/translate_c.cpp b/src/translate_c.cpp index eb5911078..7a4ad3f57 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -253,7 +253,7 @@ static AstNode *trans_create_node_symbol_str(Context *c, const char *name) { static AstNode *trans_create_node_builtin_fn_call(Context *c, Buf *name) { AstNode *node = trans_create_node(c, NodeTypeFnCallExpr); node->data.fn_call_expr.fn_ref_expr = trans_create_node_symbol(c, name); - node->data.fn_call_expr.is_builtin = true; + node->data.fn_call_expr.modifier = CallModifierBuiltin; return node; } diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig index 204121f64..19fb23356 100644 --- a/std/zig/tokenizer.zig +++ b/std/zig/tokenizer.zig @@ -36,9 +36,10 @@ pub const Token = struct { Keyword{ .bytes = "for", .id = Id.Keyword_for }, Keyword{ .bytes = "if", .id = Id.Keyword_if }, Keyword{ .bytes = "inline", .id = Id.Keyword_inline }, - Keyword{ .bytes = "noinline", .id = Id.Keyword_noinline }, Keyword{ .bytes = "nakedcc", .id = Id.Keyword_nakedcc }, Keyword{ .bytes = "noalias", .id = Id.Keyword_noalias }, + Keyword{ .bytes = "noasync", .id = Id.Keyword_noasync }, + Keyword{ .bytes = "noinline", .id = Id.Keyword_noinline }, Keyword{ .bytes = "null", .id = Id.Keyword_null }, Keyword{ .bytes = "or", .id = Id.Keyword_or }, Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse }, @@ -167,9 +168,10 @@ pub const Token = struct { Keyword_for, Keyword_if, Keyword_inline, - Keyword_noinline, Keyword_nakedcc, Keyword_noalias, + Keyword_noasync, + Keyword_noinline, Keyword_null, Keyword_or, Keyword_orelse, diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index ad8e949f8..a898889f5 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -1092,3 +1092,19 @@ test "recursive call of await @asyncCall with struct return type" { expect(res.y == 2); expect(res.z == 3); } + +test "noasync function call" { + const S = struct { + fn doTheTest() void { + const result = noasync add(50, 100); + expect(result == 150); + } + fn add(a: i32, b: i32) i32 { + if (a > 100) { + suspend; + } + return a + b; + } + }; + S.doTheTest(); +}