33385 lines
1.4 MiB
33385 lines
1.4 MiB
/*
|
|
* Copyright (c) 2016 Andrew Kelley
|
|
*
|
|
* This file is part of zig, which is MIT licensed.
|
|
* See http://opensource.org/licenses/MIT
|
|
*/
|
|
|
|
#include "analyze.hpp"
|
|
#include "ast_render.hpp"
|
|
#include "error.hpp"
|
|
#include "ir.hpp"
|
|
#include "ir_print.hpp"
|
|
#include "os.hpp"
|
|
#include "range_set.hpp"
|
|
#include "softfloat.hpp"
|
|
#include "softfloat_ext.hpp"
|
|
#include "util.hpp"
|
|
#include "mem_list.hpp"
|
|
#include "all_types.hpp"
|
|
|
|
#include <errno.h>
|
|
#include <math.h>
|
|
|
|
struct IrBuilderSrc {
|
|
CodeGen *codegen;
|
|
IrExecutableSrc *exec;
|
|
IrBasicBlockSrc *current_basic_block;
|
|
AstNode *main_block_node;
|
|
};
|
|
|
|
struct IrBuilderGen {
|
|
CodeGen *codegen;
|
|
IrExecutableGen *exec;
|
|
IrBasicBlockGen *current_basic_block;
|
|
|
|
// track for immediate post-analysis destruction
|
|
mem::List<IrInstGenConst *> constants;
|
|
};
|
|
|
|
struct IrAnalyze {
|
|
CodeGen *codegen;
|
|
IrBuilderSrc old_irb;
|
|
IrBuilderGen new_irb;
|
|
size_t old_bb_index;
|
|
size_t instruction_index;
|
|
ZigType *explicit_return_type;
|
|
AstNode *explicit_return_type_source_node;
|
|
ZigList<IrInstGen *> src_implicit_return_type_list;
|
|
ZigList<IrSuspendPosition> resume_stack;
|
|
IrBasicBlockSrc *const_predecessor_bb;
|
|
size_t ref_count;
|
|
size_t break_debug_id; // for debugging purposes
|
|
IrInstGen *return_ptr;
|
|
|
|
// For the purpose of using in a debugger
|
|
void dump();
|
|
};
|
|
|
|
enum ConstCastResultId {
|
|
ConstCastResultIdOk,
|
|
ConstCastResultIdInvalid,
|
|
ConstCastResultIdErrSet,
|
|
ConstCastResultIdErrSetGlobal,
|
|
ConstCastResultIdPointerChild,
|
|
ConstCastResultIdSliceChild,
|
|
ConstCastResultIdOptionalChild,
|
|
ConstCastResultIdOptionalShape,
|
|
ConstCastResultIdErrorUnionPayload,
|
|
ConstCastResultIdErrorUnionErrorSet,
|
|
ConstCastResultIdFnAlign,
|
|
ConstCastResultIdFnCC,
|
|
ConstCastResultIdFnVarArgs,
|
|
ConstCastResultIdFnIsGeneric,
|
|
ConstCastResultIdFnReturnType,
|
|
ConstCastResultIdFnArgCount,
|
|
ConstCastResultIdFnGenericArgCount,
|
|
ConstCastResultIdFnArg,
|
|
ConstCastResultIdFnArgNoAlias,
|
|
ConstCastResultIdType,
|
|
ConstCastResultIdUnresolvedInferredErrSet,
|
|
ConstCastResultIdAsyncAllocatorType,
|
|
ConstCastResultIdBadAllowsZero,
|
|
ConstCastResultIdArrayChild,
|
|
ConstCastResultIdSentinelArrays,
|
|
ConstCastResultIdPtrLens,
|
|
ConstCastResultIdCV,
|
|
ConstCastResultIdPtrSentinel,
|
|
ConstCastResultIdIntShorten,
|
|
ConstCastResultIdVectorLength,
|
|
ConstCastResultIdVectorChild,
|
|
};
|
|
|
|
struct ConstCastOnly;
|
|
struct ConstCastArg {
|
|
size_t arg_index;
|
|
ZigType *actual_param_type;
|
|
ZigType *expected_param_type;
|
|
ConstCastOnly *child;
|
|
};
|
|
|
|
struct ConstCastArgNoAlias {
|
|
size_t arg_index;
|
|
};
|
|
|
|
struct ConstCastOptionalMismatch;
|
|
struct ConstCastPointerMismatch;
|
|
struct ConstCastSliceMismatch;
|
|
struct ConstCastErrUnionErrSetMismatch;
|
|
struct ConstCastErrUnionPayloadMismatch;
|
|
struct ConstCastErrSetMismatch;
|
|
struct ConstCastTypeMismatch;
|
|
struct ConstCastArrayMismatch;
|
|
struct ConstCastBadAllowsZero;
|
|
struct ConstCastBadNullTermArrays;
|
|
struct ConstCastBadCV;
|
|
struct ConstCastPtrSentinel;
|
|
struct ConstCastIntShorten;
|
|
|
|
struct ConstCastOnly {
|
|
ConstCastResultId id;
|
|
union {
|
|
ConstCastErrSetMismatch *error_set_mismatch;
|
|
ConstCastPointerMismatch *pointer_mismatch;
|
|
ConstCastSliceMismatch *slice_mismatch;
|
|
ConstCastOptionalMismatch *optional;
|
|
ConstCastErrUnionPayloadMismatch *error_union_payload;
|
|
ConstCastErrUnionErrSetMismatch *error_union_error_set;
|
|
ConstCastTypeMismatch *type_mismatch;
|
|
ConstCastArrayMismatch *array_mismatch;
|
|
ConstCastOnly *return_type;
|
|
ConstCastOnly *null_wrap_ptr_child;
|
|
ConstCastArg fn_arg;
|
|
ConstCastArgNoAlias arg_no_alias;
|
|
ConstCastBadAllowsZero *bad_allows_zero;
|
|
ConstCastBadNullTermArrays *sentinel_arrays;
|
|
ConstCastBadCV *bad_cv;
|
|
ConstCastPtrSentinel *bad_ptr_sentinel;
|
|
ConstCastIntShorten *int_shorten;
|
|
} data;
|
|
};
|
|
|
|
struct ConstCastTypeMismatch {
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
struct ConstCastOptionalMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_child;
|
|
ZigType *actual_child;
|
|
};
|
|
|
|
struct ConstCastPointerMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_child;
|
|
ZigType *actual_child;
|
|
};
|
|
|
|
struct ConstCastSliceMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_child;
|
|
ZigType *actual_child;
|
|
};
|
|
|
|
struct ConstCastArrayMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_child;
|
|
ZigType *actual_child;
|
|
};
|
|
|
|
struct ConstCastErrUnionErrSetMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_err_set;
|
|
ZigType *actual_err_set;
|
|
};
|
|
|
|
struct ConstCastErrUnionPayloadMismatch {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_payload;
|
|
ZigType *actual_payload;
|
|
};
|
|
|
|
struct ConstCastErrSetMismatch {
|
|
ZigList<ErrorTableEntry *> missing_errors;
|
|
};
|
|
|
|
struct ConstCastBadAllowsZero {
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
struct ConstCastBadNullTermArrays {
|
|
ConstCastOnly child;
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
struct ConstCastBadCV {
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
struct ConstCastPtrSentinel {
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
struct ConstCastIntShorten {
|
|
ZigType *wanted_type;
|
|
ZigType *actual_type;
|
|
};
|
|
|
|
// for debugging purposes
|
|
struct DbgIrBreakPoint {
|
|
const char *src_file;
|
|
uint32_t line;
|
|
};
|
|
DbgIrBreakPoint dbg_ir_breakpoints_buf[20];
|
|
size_t dbg_ir_breakpoints_count = 0;
|
|
|
|
static IrInstSrc *ir_gen_node(IrBuilderSrc *irb, AstNode *node, Scope *scope);
|
|
static IrInstSrc *ir_gen_node_extra(IrBuilderSrc *irb, AstNode *node, Scope *scope, LVal lval,
|
|
ResultLoc *result_loc);
|
|
static IrInstGen *ir_implicit_cast(IrAnalyze *ira, IrInstGen *value, ZigType *expected_type);
|
|
static IrInstGen *ir_implicit_cast2(IrAnalyze *ira, IrInst *value_source_instr,
|
|
IrInstGen *value, ZigType *expected_type);
|
|
static IrInstGen *ir_get_deref(IrAnalyze *ira, IrInst *source_instr, IrInstGen *ptr,
|
|
ResultLoc *result_loc);
|
|
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutableSrc *exec, AstNode *source_node, Buf *msg);
|
|
static IrInstGen *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
|
|
IrInst* source_instr, IrInstGen *container_ptr, IrInst *container_ptr_src,
|
|
ZigType *container_type, bool initializing);
|
|
static void ir_assert_impl(bool ok, IrInst* source_instruction, const char *file, unsigned int line);
|
|
static void ir_assert_gen_impl(bool ok, IrInstGen *source_instruction, const char *file, unsigned int line);
|
|
static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var);
|
|
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op);
|
|
static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval, ResultLoc *result_loc);
|
|
static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc);
|
|
static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align);
|
|
static ZigType *adjust_ptr_const(CodeGen *g, ZigType *ptr_type, bool is_const);
|
|
static ZigType *adjust_slice_align(CodeGen *g, ZigType *slice_type, uint32_t new_align);
|
|
static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, uint8_t *buf, ZigValue *val);
|
|
static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ZigValue *val);
|
|
static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
|
|
ZigValue *out_val, ZigValue *ptr_val);
|
|
static IrInstGen *ir_analyze_ptr_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *ptr,
|
|
IrInst *ptr_src, ZigType *dest_type, IrInst *dest_type_src, bool safety_check_on,
|
|
bool keep_bigger_alignment);
|
|
static ZigValue *ir_resolve_const(IrAnalyze *ira, IrInstGen *value, UndefAllowed undef_allowed);
|
|
static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align);
|
|
static IrInstGen *ir_analyze_int_to_ptr(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
|
|
ZigType *ptr_type);
|
|
static IrInstGen *ir_analyze_bit_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
|
|
ZigType *dest_type);
|
|
static IrInstGen *ir_resolve_result_raw(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime, bool allow_discard);
|
|
static IrInstGen *ir_resolve_result(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime, bool allow_discard);
|
|
static IrInstGen *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool safety_check_on, bool initializing);
|
|
static IrInstGen *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool safety_check_on, bool initializing);
|
|
static IrInstGen *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool initializing);
|
|
static IrInstGen *ir_analyze_store_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *ptr, IrInstGen *uncasted_value, bool allow_write_through_const);
|
|
static IrInstSrc *ir_gen_union_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *union_type, IrInstSrc *field_name, AstNode *expr_node,
|
|
LVal lval, ResultLoc *parent_result_loc);
|
|
static void ir_reset_result(ResultLoc *result_loc);
|
|
static Buf *get_anon_type_name(CodeGen *codegen, IrExecutableSrc *exec, const char *kind_name,
|
|
Scope *scope, AstNode *source_node, Buf *out_bare_name);
|
|
static ResultLocCast *ir_build_cast_result_loc(IrBuilderSrc *irb, IrInstSrc *dest_type,
|
|
ResultLoc *parent_result_loc);
|
|
static IrInstGen *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
TypeStructField *field, IrInstGen *struct_ptr, ZigType *struct_type, bool initializing);
|
|
static IrInstGen *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
|
|
IrInst* source_instr, IrInstGen *container_ptr, ZigType *container_type);
|
|
static ResultLoc *no_result_loc(void);
|
|
static IrInstGen *ir_analyze_test_non_null(IrAnalyze *ira, IrInst *source_inst, IrInstGen *value);
|
|
static IrInstGen *ir_error_dependency_loop(IrAnalyze *ira, IrInst *source_instr);
|
|
static IrInstGen *ir_const_undef(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty);
|
|
static ZigVar *ir_create_var(IrBuilderSrc *irb, AstNode *node, Scope *scope, Buf *name,
|
|
bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime);
|
|
static void build_decl_var_and_init(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
|
|
IrInstSrc *init, const char *name_hint, IrInstSrc *is_comptime);
|
|
static IrInstGen *ir_analyze_union_init(IrAnalyze *ira, IrInst* source_instruction,
|
|
AstNode *field_source_node, ZigType *union_type, Buf *field_name, IrInstGen *field_result_loc,
|
|
IrInstGen *result_loc);
|
|
static IrInstGen *ir_analyze_struct_value_field_value(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *struct_operand, TypeStructField *field);
|
|
static bool value_cmp_numeric_val_any(ZigValue *left, Cmp predicate, ZigValue *right);
|
|
static bool value_cmp_numeric_val_all(ZigValue *left, Cmp predicate, ZigValue *right);
|
|
static void memoize_field_init_val(CodeGen *codegen, ZigType *container_type, TypeStructField *field);
|
|
static void value_to_bigfloat(BigFloat *out, ZigValue *val);
|
|
|
|
#define ir_assert(OK, SOURCE_INSTRUCTION) ir_assert_impl((OK), (SOURCE_INSTRUCTION), __FILE__, __LINE__)
|
|
#define ir_assert_gen(OK, SOURCE_INSTRUCTION) ir_assert_gen_impl((OK), (SOURCE_INSTRUCTION), __FILE__, __LINE__)
|
|
|
|
static void destroy_instruction_src(IrInstSrc *inst) {
|
|
switch (inst->id) {
|
|
case IrInstSrcIdInvalid:
|
|
zig_unreachable();
|
|
case IrInstSrcIdReturn:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcReturn *>(inst));
|
|
case IrInstSrcIdConst:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcConst *>(inst));
|
|
case IrInstSrcIdBinOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBinOp *>(inst));
|
|
case IrInstSrcIdMergeErrSets:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMergeErrSets *>(inst));
|
|
case IrInstSrcIdDeclVar:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcDeclVar *>(inst));
|
|
case IrInstSrcIdCall:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCall *>(inst));
|
|
case IrInstSrcIdCallExtra:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCallExtra *>(inst));
|
|
case IrInstSrcIdAsyncCallExtra:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAsyncCallExtra *>(inst));
|
|
case IrInstSrcIdUnOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnOp *>(inst));
|
|
case IrInstSrcIdCondBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCondBr *>(inst));
|
|
case IrInstSrcIdBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBr *>(inst));
|
|
case IrInstSrcIdPhi:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPhi *>(inst));
|
|
case IrInstSrcIdContainerInitList:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcContainerInitList *>(inst));
|
|
case IrInstSrcIdContainerInitFields:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcContainerInitFields *>(inst));
|
|
case IrInstSrcIdUnreachable:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnreachable *>(inst));
|
|
case IrInstSrcIdElemPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcElemPtr *>(inst));
|
|
case IrInstSrcIdVarPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcVarPtr *>(inst));
|
|
case IrInstSrcIdLoadPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcLoadPtr *>(inst));
|
|
case IrInstSrcIdStorePtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcStorePtr *>(inst));
|
|
case IrInstSrcIdTypeOf:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeOf *>(inst));
|
|
case IrInstSrcIdFieldPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFieldPtr *>(inst));
|
|
case IrInstSrcIdSetCold:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetCold *>(inst));
|
|
case IrInstSrcIdSetRuntimeSafety:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetRuntimeSafety *>(inst));
|
|
case IrInstSrcIdSetFloatMode:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetFloatMode *>(inst));
|
|
case IrInstSrcIdArrayType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcArrayType *>(inst));
|
|
case IrInstSrcIdSliceType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSliceType *>(inst));
|
|
case IrInstSrcIdAnyFrameType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAnyFrameType *>(inst));
|
|
case IrInstSrcIdAsm:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAsm *>(inst));
|
|
case IrInstSrcIdSizeOf:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSizeOf *>(inst));
|
|
case IrInstSrcIdTestNonNull:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestNonNull *>(inst));
|
|
case IrInstSrcIdOptionalUnwrapPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcOptionalUnwrapPtr *>(inst));
|
|
case IrInstSrcIdPopCount:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPopCount *>(inst));
|
|
case IrInstSrcIdClz:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcClz *>(inst));
|
|
case IrInstSrcIdCtz:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCtz *>(inst));
|
|
case IrInstSrcIdBswap:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBswap *>(inst));
|
|
case IrInstSrcIdBitReverse:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitReverse *>(inst));
|
|
case IrInstSrcIdSwitchBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchBr *>(inst));
|
|
case IrInstSrcIdSwitchVar:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchVar *>(inst));
|
|
case IrInstSrcIdSwitchElseVar:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchElseVar *>(inst));
|
|
case IrInstSrcIdSwitchTarget:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchTarget *>(inst));
|
|
case IrInstSrcIdImport:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcImport *>(inst));
|
|
case IrInstSrcIdRef:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcRef *>(inst));
|
|
case IrInstSrcIdCompileErr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCompileErr *>(inst));
|
|
case IrInstSrcIdCompileLog:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCompileLog *>(inst));
|
|
case IrInstSrcIdErrName:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrName *>(inst));
|
|
case IrInstSrcIdCImport:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCImport *>(inst));
|
|
case IrInstSrcIdCInclude:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCInclude *>(inst));
|
|
case IrInstSrcIdCDefine:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCDefine *>(inst));
|
|
case IrInstSrcIdCUndef:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCUndef *>(inst));
|
|
case IrInstSrcIdEmbedFile:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEmbedFile *>(inst));
|
|
case IrInstSrcIdCmpxchg:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCmpxchg *>(inst));
|
|
case IrInstSrcIdFence:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFence *>(inst));
|
|
case IrInstSrcIdReduce:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcReduce *>(inst));
|
|
case IrInstSrcIdTruncate:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTruncate *>(inst));
|
|
case IrInstSrcIdIntCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntCast *>(inst));
|
|
case IrInstSrcIdFloatCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatCast *>(inst));
|
|
case IrInstSrcIdErrSetCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrSetCast *>(inst));
|
|
case IrInstSrcIdIntToFloat:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToFloat *>(inst));
|
|
case IrInstSrcIdFloatToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatToInt *>(inst));
|
|
case IrInstSrcIdBoolToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBoolToInt *>(inst));
|
|
case IrInstSrcIdVectorType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcVectorType *>(inst));
|
|
case IrInstSrcIdShuffleVector:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcShuffleVector *>(inst));
|
|
case IrInstSrcIdSplat:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSplat *>(inst));
|
|
case IrInstSrcIdBoolNot:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBoolNot *>(inst));
|
|
case IrInstSrcIdMemset:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMemset *>(inst));
|
|
case IrInstSrcIdMemcpy:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMemcpy *>(inst));
|
|
case IrInstSrcIdSlice:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSlice *>(inst));
|
|
case IrInstSrcIdBreakpoint:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBreakpoint *>(inst));
|
|
case IrInstSrcIdReturnAddress:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcReturnAddress *>(inst));
|
|
case IrInstSrcIdFrameAddress:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameAddress *>(inst));
|
|
case IrInstSrcIdFrameHandle:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameHandle *>(inst));
|
|
case IrInstSrcIdFrameType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameType *>(inst));
|
|
case IrInstSrcIdFrameSize:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameSize *>(inst));
|
|
case IrInstSrcIdAlignOf:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlignOf *>(inst));
|
|
case IrInstSrcIdOverflowOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcOverflowOp *>(inst));
|
|
case IrInstSrcIdTestErr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestErr *>(inst));
|
|
case IrInstSrcIdUnwrapErrCode:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnwrapErrCode *>(inst));
|
|
case IrInstSrcIdUnwrapErrPayload:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnwrapErrPayload *>(inst));
|
|
case IrInstSrcIdFnProto:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFnProto *>(inst));
|
|
case IrInstSrcIdTestComptime:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestComptime *>(inst));
|
|
case IrInstSrcIdPtrCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrCast *>(inst));
|
|
case IrInstSrcIdBitCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitCast *>(inst));
|
|
case IrInstSrcIdPtrToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrToInt *>(inst));
|
|
case IrInstSrcIdIntToPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToPtr *>(inst));
|
|
case IrInstSrcIdIntToEnum:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToEnum *>(inst));
|
|
case IrInstSrcIdIntToErr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToErr *>(inst));
|
|
case IrInstSrcIdErrToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrToInt *>(inst));
|
|
case IrInstSrcIdCheckSwitchProngs:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckSwitchProngs *>(inst));
|
|
case IrInstSrcIdCheckStatementIsVoid:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckStatementIsVoid *>(inst));
|
|
case IrInstSrcIdTypeName:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeName *>(inst));
|
|
case IrInstSrcIdTagName:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTagName *>(inst));
|
|
case IrInstSrcIdPtrType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrType *>(inst));
|
|
case IrInstSrcIdDeclRef:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcDeclRef *>(inst));
|
|
case IrInstSrcIdPanic:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPanic *>(inst));
|
|
case IrInstSrcIdFieldParentPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFieldParentPtr *>(inst));
|
|
case IrInstSrcIdByteOffsetOf:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcByteOffsetOf *>(inst));
|
|
case IrInstSrcIdBitOffsetOf:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitOffsetOf *>(inst));
|
|
case IrInstSrcIdTypeInfo:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeInfo *>(inst));
|
|
case IrInstSrcIdType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcType *>(inst));
|
|
case IrInstSrcIdHasField:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcHasField *>(inst));
|
|
case IrInstSrcIdSetEvalBranchQuota:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetEvalBranchQuota *>(inst));
|
|
case IrInstSrcIdAlignCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlignCast *>(inst));
|
|
case IrInstSrcIdImplicitCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcImplicitCast *>(inst));
|
|
case IrInstSrcIdResolveResult:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResolveResult *>(inst));
|
|
case IrInstSrcIdResetResult:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResetResult *>(inst));
|
|
case IrInstSrcIdSetAlignStack:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetAlignStack *>(inst));
|
|
case IrInstSrcIdArgType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcArgType *>(inst));
|
|
case IrInstSrcIdTagType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTagType *>(inst));
|
|
case IrInstSrcIdExport:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcExport *>(inst));
|
|
case IrInstSrcIdExtern:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcExtern *>(inst));
|
|
case IrInstSrcIdErrorReturnTrace:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrorReturnTrace *>(inst));
|
|
case IrInstSrcIdErrorUnion:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrorUnion *>(inst));
|
|
case IrInstSrcIdAtomicRmw:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicRmw *>(inst));
|
|
case IrInstSrcIdSaveErrRetAddr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSaveErrRetAddr *>(inst));
|
|
case IrInstSrcIdAddImplicitReturnType:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAddImplicitReturnType *>(inst));
|
|
case IrInstSrcIdFloatOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatOp *>(inst));
|
|
case IrInstSrcIdMulAdd:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMulAdd *>(inst));
|
|
case IrInstSrcIdAtomicLoad:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicLoad *>(inst));
|
|
case IrInstSrcIdAtomicStore:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicStore *>(inst));
|
|
case IrInstSrcIdEnumToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEnumToInt *>(inst));
|
|
case IrInstSrcIdCheckRuntimeScope:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckRuntimeScope *>(inst));
|
|
case IrInstSrcIdHasDecl:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcHasDecl *>(inst));
|
|
case IrInstSrcIdUndeclaredIdent:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUndeclaredIdent *>(inst));
|
|
case IrInstSrcIdAlloca:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlloca *>(inst));
|
|
case IrInstSrcIdEndExpr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEndExpr *>(inst));
|
|
case IrInstSrcIdUnionInitNamedField:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnionInitNamedField *>(inst));
|
|
case IrInstSrcIdSuspendBegin:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSuspendBegin *>(inst));
|
|
case IrInstSrcIdSuspendFinish:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSuspendFinish *>(inst));
|
|
case IrInstSrcIdResume:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResume *>(inst));
|
|
case IrInstSrcIdAwait:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAwait *>(inst));
|
|
case IrInstSrcIdSpillBegin:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSpillBegin *>(inst));
|
|
case IrInstSrcIdSpillEnd:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSpillEnd *>(inst));
|
|
case IrInstSrcIdCallArgs:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCallArgs *>(inst));
|
|
case IrInstSrcIdWasmMemorySize:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcWasmMemorySize *>(inst));
|
|
case IrInstSrcIdWasmMemoryGrow:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcWasmMemoryGrow *>(inst));
|
|
case IrInstSrcIdSrc:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSrc *>(inst));
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
void destroy_instruction_gen(IrInstGen *inst) {
|
|
switch (inst->id) {
|
|
case IrInstGenIdInvalid:
|
|
zig_unreachable();
|
|
case IrInstGenIdReturn:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturn *>(inst));
|
|
case IrInstGenIdConst:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenConst *>(inst));
|
|
case IrInstGenIdBinOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBinOp *>(inst));
|
|
case IrInstGenIdCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCast *>(inst));
|
|
case IrInstGenIdCall:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCall *>(inst));
|
|
case IrInstGenIdCondBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCondBr *>(inst));
|
|
case IrInstGenIdBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBr *>(inst));
|
|
case IrInstGenIdPhi:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPhi *>(inst));
|
|
case IrInstGenIdUnreachable:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnreachable *>(inst));
|
|
case IrInstGenIdElemPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenElemPtr *>(inst));
|
|
case IrInstGenIdVarPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVarPtr *>(inst));
|
|
case IrInstGenIdReturnPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturnPtr *>(inst));
|
|
case IrInstGenIdLoadPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenLoadPtr *>(inst));
|
|
case IrInstGenIdStorePtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenStorePtr *>(inst));
|
|
case IrInstGenIdVectorStoreElem:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorStoreElem *>(inst));
|
|
case IrInstGenIdStructFieldPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenStructFieldPtr *>(inst));
|
|
case IrInstGenIdUnionFieldPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnionFieldPtr *>(inst));
|
|
case IrInstGenIdAsm:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAsm *>(inst));
|
|
case IrInstGenIdTestNonNull:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTestNonNull *>(inst));
|
|
case IrInstGenIdOptionalUnwrapPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOptionalUnwrapPtr *>(inst));
|
|
case IrInstGenIdPopCount:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPopCount *>(inst));
|
|
case IrInstGenIdClz:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenClz *>(inst));
|
|
case IrInstGenIdCtz:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCtz *>(inst));
|
|
case IrInstGenIdBswap:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBswap *>(inst));
|
|
case IrInstGenIdBitReverse:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBitReverse *>(inst));
|
|
case IrInstGenIdSwitchBr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSwitchBr *>(inst));
|
|
case IrInstGenIdUnionTag:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnionTag *>(inst));
|
|
case IrInstGenIdRef:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenRef *>(inst));
|
|
case IrInstGenIdErrName:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrName *>(inst));
|
|
case IrInstGenIdCmpxchg:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCmpxchg *>(inst));
|
|
case IrInstGenIdFence:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFence *>(inst));
|
|
case IrInstGenIdReduce:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReduce *>(inst));
|
|
case IrInstGenIdTruncate:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTruncate *>(inst));
|
|
case IrInstGenIdShuffleVector:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenShuffleVector *>(inst));
|
|
case IrInstGenIdSplat:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSplat *>(inst));
|
|
case IrInstGenIdBoolNot:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBoolNot *>(inst));
|
|
case IrInstGenIdMemset:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMemset *>(inst));
|
|
case IrInstGenIdMemcpy:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMemcpy *>(inst));
|
|
case IrInstGenIdSlice:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSlice *>(inst));
|
|
case IrInstGenIdBreakpoint:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBreakpoint *>(inst));
|
|
case IrInstGenIdReturnAddress:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturnAddress *>(inst));
|
|
case IrInstGenIdFrameAddress:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameAddress *>(inst));
|
|
case IrInstGenIdFrameHandle:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameHandle *>(inst));
|
|
case IrInstGenIdFrameSize:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameSize *>(inst));
|
|
case IrInstGenIdOverflowOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOverflowOp *>(inst));
|
|
case IrInstGenIdTestErr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTestErr *>(inst));
|
|
case IrInstGenIdUnwrapErrCode:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnwrapErrCode *>(inst));
|
|
case IrInstGenIdUnwrapErrPayload:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnwrapErrPayload *>(inst));
|
|
case IrInstGenIdOptionalWrap:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOptionalWrap *>(inst));
|
|
case IrInstGenIdErrWrapCode:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrWrapCode *>(inst));
|
|
case IrInstGenIdErrWrapPayload:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrWrapPayload *>(inst));
|
|
case IrInstGenIdPtrCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrCast *>(inst));
|
|
case IrInstGenIdBitCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBitCast *>(inst));
|
|
case IrInstGenIdWidenOrShorten:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWidenOrShorten *>(inst));
|
|
case IrInstGenIdPtrToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrToInt *>(inst));
|
|
case IrInstGenIdIntToPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToPtr *>(inst));
|
|
case IrInstGenIdIntToEnum:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToEnum *>(inst));
|
|
case IrInstGenIdIntToErr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToErr *>(inst));
|
|
case IrInstGenIdErrToInt:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrToInt *>(inst));
|
|
case IrInstGenIdTagName:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTagName *>(inst));
|
|
case IrInstGenIdPanic:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPanic *>(inst));
|
|
case IrInstGenIdFieldParentPtr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFieldParentPtr *>(inst));
|
|
case IrInstGenIdAlignCast:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAlignCast *>(inst));
|
|
case IrInstGenIdErrorReturnTrace:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrorReturnTrace *>(inst));
|
|
case IrInstGenIdAtomicRmw:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicRmw *>(inst));
|
|
case IrInstGenIdSaveErrRetAddr:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSaveErrRetAddr *>(inst));
|
|
case IrInstGenIdFloatOp:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFloatOp *>(inst));
|
|
case IrInstGenIdMulAdd:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMulAdd *>(inst));
|
|
case IrInstGenIdAtomicLoad:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicLoad *>(inst));
|
|
case IrInstGenIdAtomicStore:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicStore *>(inst));
|
|
case IrInstGenIdDeclVar:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenDeclVar *>(inst));
|
|
case IrInstGenIdArrayToVector:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenArrayToVector *>(inst));
|
|
case IrInstGenIdVectorToArray:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorToArray *>(inst));
|
|
case IrInstGenIdPtrOfArrayToSlice:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrOfArrayToSlice *>(inst));
|
|
case IrInstGenIdAssertZero:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertZero *>(inst));
|
|
case IrInstGenIdAssertNonNull:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertNonNull *>(inst));
|
|
case IrInstGenIdAlloca:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAlloca *>(inst));
|
|
case IrInstGenIdSuspendBegin:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSuspendBegin *>(inst));
|
|
case IrInstGenIdSuspendFinish:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSuspendFinish *>(inst));
|
|
case IrInstGenIdResume:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenResume *>(inst));
|
|
case IrInstGenIdAwait:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAwait *>(inst));
|
|
case IrInstGenIdSpillBegin:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSpillBegin *>(inst));
|
|
case IrInstGenIdSpillEnd:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSpillEnd *>(inst));
|
|
case IrInstGenIdVectorExtractElem:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorExtractElem *>(inst));
|
|
case IrInstGenIdBinaryNot:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBinaryNot *>(inst));
|
|
case IrInstGenIdNegation:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenNegation *>(inst));
|
|
case IrInstGenIdWasmMemorySize:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWasmMemorySize *>(inst));
|
|
case IrInstGenIdWasmMemoryGrow:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWasmMemoryGrow *>(inst));
|
|
case IrInstGenIdExtern:
|
|
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenExtern *>(inst));
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static void ira_ref(IrAnalyze *ira) {
|
|
ira->ref_count += 1;
|
|
}
|
|
static void ira_deref(IrAnalyze *ira) {
|
|
if (ira->ref_count > 1) {
|
|
ira->ref_count -= 1;
|
|
|
|
// immediate destruction of dangling IrInstGenConst is not possible
|
|
// free tracking memory because it will never be used
|
|
ira->new_irb.constants.deinit(&heap::c_allocator);
|
|
return;
|
|
}
|
|
assert(ira->ref_count != 0);
|
|
|
|
for (size_t bb_i = 0; bb_i < ira->old_irb.exec->basic_block_list.length; bb_i += 1) {
|
|
IrBasicBlockSrc *pass1_bb = ira->old_irb.exec->basic_block_list.items[bb_i];
|
|
for (size_t inst_i = 0; inst_i < pass1_bb->instruction_list.length; inst_i += 1) {
|
|
IrInstSrc *pass1_inst = pass1_bb->instruction_list.items[inst_i];
|
|
destroy_instruction_src(pass1_inst);
|
|
}
|
|
heap::c_allocator.destroy(pass1_bb);
|
|
}
|
|
ira->old_irb.exec->basic_block_list.deinit();
|
|
ira->old_irb.exec->tld_list.deinit();
|
|
heap::c_allocator.destroy(ira->old_irb.exec);
|
|
ira->src_implicit_return_type_list.deinit();
|
|
ira->resume_stack.deinit();
|
|
|
|
// destroy dangling IrInstGenConst
|
|
for (size_t i = 0; i < ira->new_irb.constants.length; i += 1) {
|
|
auto constant = ira->new_irb.constants.items[i];
|
|
if (constant->base.base.ref_count == 0 && !ir_inst_gen_has_side_effects(&constant->base))
|
|
destroy_instruction_gen(&constant->base);
|
|
}
|
|
ira->new_irb.constants.deinit(&heap::c_allocator);
|
|
|
|
heap::c_allocator.destroy(ira);
|
|
}
|
|
|
|
static ZigValue *const_ptr_pointee_unchecked_no_isf(CodeGen *g, ZigValue *const_val) {
|
|
assert(get_src_ptr_type(const_val->type) != nullptr);
|
|
assert(const_val->special == ConstValSpecialStatic);
|
|
|
|
switch (type_has_one_possible_value(g, const_val->type->data.pointer.child_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return nullptr;
|
|
case OnePossibleValueYes:
|
|
return get_the_one_possible_value(g, const_val->type->data.pointer.child_type);
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
ZigValue *result;
|
|
switch (const_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
result = const_val->data.x_ptr.data.ref.pointee;
|
|
break;
|
|
case ConstPtrSpecialBaseArray: {
|
|
ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
|
|
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
|
|
if (elem_index == array_val->type->data.array.len) {
|
|
result = array_val->type->data.array.sentinel;
|
|
} else {
|
|
expand_undef_array(g, array_val);
|
|
result = &array_val->data.x_array.data.s_none.elements[elem_index];
|
|
}
|
|
break;
|
|
}
|
|
case ConstPtrSpecialSubArray: {
|
|
ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
|
|
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
|
|
|
|
expand_undef_array(g, array_val);
|
|
result = g->pass1_arena->create<ZigValue>();
|
|
result->special = array_val->special;
|
|
result->type = get_array_type(g, array_val->type->data.array.child_type,
|
|
array_val->type->data.array.len - elem_index, array_val->type->data.array.sentinel);
|
|
result->data.x_array.special = ConstArraySpecialNone;
|
|
result->data.x_array.data.s_none.elements = &array_val->data.x_array.data.s_none.elements[elem_index];
|
|
result->parent.id = ConstParentIdArray;
|
|
result->parent.data.p_array.array_val = array_val;
|
|
result->parent.data.p_array.elem_index = elem_index;
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct: {
|
|
ZigValue *struct_val = const_val->data.x_ptr.data.base_struct.struct_val;
|
|
expand_undef_struct(g, struct_val);
|
|
result = struct_val->data.x_struct.fields[const_val->data.x_ptr.data.base_struct.field_index];
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
result = const_val->data.x_ptr.data.base_err_union_code.err_union_val->data.x_err_union.error_set;
|
|
break;
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
result = const_val->data.x_ptr.data.base_err_union_payload.err_union_val->data.x_err_union.payload;
|
|
break;
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
result = const_val->data.x_ptr.data.base_optional_payload.optional_val->data.x_optional;
|
|
break;
|
|
case ConstPtrSpecialNull:
|
|
result = const_val;
|
|
break;
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_unreachable();
|
|
}
|
|
assert(result != nullptr);
|
|
return result;
|
|
}
|
|
|
|
static ZigValue *const_ptr_pointee_unchecked(CodeGen *g, ZigValue *const_val) {
|
|
assert(get_src_ptr_type(const_val->type) != nullptr);
|
|
assert(const_val->special == ConstValSpecialStatic);
|
|
|
|
InferredStructField *isf = const_val->type->data.pointer.inferred_struct_field;
|
|
if (isf != nullptr) {
|
|
TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
|
|
assert(field != nullptr);
|
|
if (field->is_comptime) {
|
|
assert(field->init_val != nullptr);
|
|
return field->init_val;
|
|
}
|
|
ZigValue *struct_val = const_ptr_pointee_unchecked_no_isf(g, const_val);
|
|
assert(struct_val->type->id == ZigTypeIdStruct);
|
|
return struct_val->data.x_struct.fields[field->src_index];
|
|
}
|
|
|
|
return const_ptr_pointee_unchecked_no_isf(g, const_val);
|
|
}
|
|
|
|
static bool is_tuple(ZigType *type) {
|
|
return type->id == ZigTypeIdStruct && type->data.structure.special == StructSpecialInferredTuple;
|
|
}
|
|
|
|
static bool is_slice(ZigType *type) {
|
|
return type->id == ZigTypeIdStruct && type->data.structure.special == StructSpecialSlice;
|
|
}
|
|
|
|
// This function returns true when you can change the type of a ZigValue and the
|
|
// value remains meaningful.
|
|
static bool types_have_same_zig_comptime_repr(CodeGen *codegen, ZigType *expected, ZigType *actual) {
|
|
if (expected == actual)
|
|
return true;
|
|
|
|
if (get_src_ptr_type(expected) != nullptr && get_src_ptr_type(actual) != nullptr)
|
|
return true;
|
|
|
|
if (is_opt_err_set(expected) && is_opt_err_set(actual))
|
|
return true;
|
|
|
|
// XXX: Vectors and arrays are interchangeable at comptime
|
|
if (expected->id != actual->id)
|
|
return false;
|
|
|
|
switch (expected->id) {
|
|
case ZigTypeIdInvalid:
|
|
case ZigTypeIdUnreachable:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdAnyFrame:
|
|
case ZigTypeIdFn:
|
|
return true;
|
|
case ZigTypeIdPointer:
|
|
return expected->data.pointer.inferred_struct_field == actual->data.pointer.inferred_struct_field;
|
|
case ZigTypeIdFloat:
|
|
return expected->data.floating.bit_count == actual->data.floating.bit_count;
|
|
case ZigTypeIdInt:
|
|
return expected->data.integral.is_signed == actual->data.integral.is_signed;
|
|
case ZigTypeIdStruct:
|
|
return is_slice(expected) && is_slice(actual);
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFnFrame:
|
|
return false;
|
|
case ZigTypeIdVector:
|
|
return expected->data.vector.len == actual->data.vector.len &&
|
|
types_have_same_zig_comptime_repr(codegen, expected->data.vector.elem_type, actual->data.vector.elem_type);
|
|
case ZigTypeIdArray:
|
|
return expected->data.array.len == actual->data.array.len &&
|
|
expected->data.array.child_type == actual->data.array.child_type &&
|
|
(expected->data.array.sentinel == nullptr || (actual->data.array.sentinel != nullptr &&
|
|
const_values_equal(codegen, expected->data.array.sentinel, actual->data.array.sentinel)));
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static bool ir_should_inline(IrExecutableSrc *exec, Scope *scope) {
|
|
if (exec->is_inline)
|
|
return true;
|
|
|
|
while (scope != nullptr) {
|
|
if (scope->id == ScopeIdCompTime)
|
|
return true;
|
|
if (scope->id == ScopeIdTypeOf)
|
|
return false;
|
|
if (scope->id == ScopeIdFnDef)
|
|
break;
|
|
scope = scope->parent;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void ir_instruction_append(IrBasicBlockSrc *basic_block, IrInstSrc *instruction) {
|
|
assert(basic_block);
|
|
assert(instruction);
|
|
basic_block->instruction_list.append(instruction);
|
|
}
|
|
|
|
static void ir_inst_gen_append(IrBasicBlockGen *basic_block, IrInstGen *instruction) {
|
|
assert(basic_block);
|
|
assert(instruction);
|
|
basic_block->instruction_list.append(instruction);
|
|
}
|
|
|
|
static size_t exec_next_debug_id(IrExecutableSrc *exec) {
|
|
size_t result = exec->next_debug_id;
|
|
exec->next_debug_id += 1;
|
|
return result;
|
|
}
|
|
|
|
static size_t exec_next_debug_id_gen(IrExecutableGen *exec) {
|
|
size_t result = exec->next_debug_id;
|
|
exec->next_debug_id += 1;
|
|
return result;
|
|
}
|
|
|
|
static ZigFn *exec_fn_entry(IrExecutableSrc *exec) {
|
|
return exec->fn_entry;
|
|
}
|
|
|
|
static Buf *exec_c_import_buf(IrExecutableSrc *exec) {
|
|
return exec->c_import_buf;
|
|
}
|
|
|
|
static bool value_is_comptime(ZigValue *const_val) {
|
|
return const_val->special != ConstValSpecialRuntime;
|
|
}
|
|
|
|
static bool instr_is_comptime(IrInstGen *instruction) {
|
|
return value_is_comptime(instruction->value);
|
|
}
|
|
|
|
static bool instr_is_unreachable(IrInstSrc *instruction) {
|
|
return instruction->is_noreturn;
|
|
}
|
|
|
|
static void ir_ref_bb(IrBasicBlockSrc *bb) {
|
|
bb->ref_count += 1;
|
|
}
|
|
|
|
static void ir_ref_instruction(IrInstSrc *instruction, IrBasicBlockSrc *cur_bb) {
|
|
assert(instruction->id != IrInstSrcIdInvalid);
|
|
instruction->base.ref_count += 1;
|
|
if (instruction->owner_bb != cur_bb && !instr_is_unreachable(instruction)
|
|
&& instruction->id != IrInstSrcIdConst)
|
|
{
|
|
ir_ref_bb(instruction->owner_bb);
|
|
}
|
|
}
|
|
|
|
static void ir_ref_inst_gen(IrInstGen *instruction) {
|
|
assert(instruction->id != IrInstGenIdInvalid);
|
|
instruction->base.ref_count += 1;
|
|
}
|
|
|
|
static void ir_ref_var(ZigVar *var) {
|
|
var->ref_count += 1;
|
|
}
|
|
|
|
static void create_result_ptr(CodeGen *codegen, ZigType *expected_type,
|
|
ZigValue **out_result, ZigValue **out_result_ptr)
|
|
{
|
|
ZigValue *result = codegen->pass1_arena->create<ZigValue>();
|
|
ZigValue *result_ptr = codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialUndef;
|
|
result->type = expected_type;
|
|
result_ptr->special = ConstValSpecialStatic;
|
|
result_ptr->type = get_pointer_to_type(codegen, result->type, false);
|
|
result_ptr->data.x_ptr.mut = ConstPtrMutComptimeVar;
|
|
result_ptr->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result_ptr->data.x_ptr.data.ref.pointee = result;
|
|
|
|
*out_result = result;
|
|
*out_result_ptr = result_ptr;
|
|
}
|
|
|
|
ZigType *ir_analyze_type_expr(IrAnalyze *ira, Scope *scope, AstNode *node) {
|
|
Error err;
|
|
|
|
ZigValue *result;
|
|
ZigValue *result_ptr;
|
|
create_result_ptr(ira->codegen, ira->codegen->builtin_types.entry_type, &result, &result_ptr);
|
|
|
|
if ((err = ir_eval_const_value(ira->codegen, scope, node, result_ptr,
|
|
ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
|
|
nullptr, nullptr, node, nullptr, ira->new_irb.exec, nullptr, UndefBad)))
|
|
{
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (type_is_invalid(result->type))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
assert(result->special != ConstValSpecialRuntime);
|
|
ZigType *res_type = result->data.x_type;
|
|
|
|
return res_type;
|
|
}
|
|
|
|
static IrBasicBlockSrc *ir_create_basic_block(IrBuilderSrc *irb, Scope *scope, const char *name_hint) {
|
|
IrBasicBlockSrc *result = heap::c_allocator.create<IrBasicBlockSrc>();
|
|
result->scope = scope;
|
|
result->name_hint = name_hint;
|
|
result->debug_id = exec_next_debug_id(irb->exec);
|
|
result->index = UINT32_MAX; // set later
|
|
return result;
|
|
}
|
|
|
|
static IrBasicBlockGen *ir_create_basic_block_gen(IrAnalyze *ira, Scope *scope, const char *name_hint) {
|
|
IrBasicBlockGen *result = heap::c_allocator.create<IrBasicBlockGen>();
|
|
result->scope = scope;
|
|
result->name_hint = name_hint;
|
|
result->debug_id = exec_next_debug_id_gen(ira->new_irb.exec);
|
|
return result;
|
|
}
|
|
|
|
static IrBasicBlockGen *ir_build_bb_from(IrAnalyze *ira, IrBasicBlockSrc *other_bb) {
|
|
IrBasicBlockGen *new_bb = ir_create_basic_block_gen(ira, other_bb->scope, other_bb->name_hint);
|
|
other_bb->child = new_bb;
|
|
return new_bb;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcDeclVar *) {
|
|
return IrInstSrcIdDeclVar;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBr *) {
|
|
return IrInstSrcIdBr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCondBr *) {
|
|
return IrInstSrcIdCondBr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchBr *) {
|
|
return IrInstSrcIdSwitchBr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchVar *) {
|
|
return IrInstSrcIdSwitchVar;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchElseVar *) {
|
|
return IrInstSrcIdSwitchElseVar;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchTarget *) {
|
|
return IrInstSrcIdSwitchTarget;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPhi *) {
|
|
return IrInstSrcIdPhi;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnOp *) {
|
|
return IrInstSrcIdUnOp;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBinOp *) {
|
|
return IrInstSrcIdBinOp;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcMergeErrSets *) {
|
|
return IrInstSrcIdMergeErrSets;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcLoadPtr *) {
|
|
return IrInstSrcIdLoadPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcStorePtr *) {
|
|
return IrInstSrcIdStorePtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFieldPtr *) {
|
|
return IrInstSrcIdFieldPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcElemPtr *) {
|
|
return IrInstSrcIdElemPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcVarPtr *) {
|
|
return IrInstSrcIdVarPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCall *) {
|
|
return IrInstSrcIdCall;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCallArgs *) {
|
|
return IrInstSrcIdCallArgs;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCallExtra *) {
|
|
return IrInstSrcIdCallExtra;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAsyncCallExtra *) {
|
|
return IrInstSrcIdAsyncCallExtra;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcConst *) {
|
|
return IrInstSrcIdConst;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcReturn *) {
|
|
return IrInstSrcIdReturn;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcContainerInitList *) {
|
|
return IrInstSrcIdContainerInitList;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcContainerInitFields *) {
|
|
return IrInstSrcIdContainerInitFields;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnreachable *) {
|
|
return IrInstSrcIdUnreachable;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeOf *) {
|
|
return IrInstSrcIdTypeOf;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetCold *) {
|
|
return IrInstSrcIdSetCold;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetRuntimeSafety *) {
|
|
return IrInstSrcIdSetRuntimeSafety;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetFloatMode *) {
|
|
return IrInstSrcIdSetFloatMode;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcArrayType *) {
|
|
return IrInstSrcIdArrayType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAnyFrameType *) {
|
|
return IrInstSrcIdAnyFrameType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSliceType *) {
|
|
return IrInstSrcIdSliceType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAsm *) {
|
|
return IrInstSrcIdAsm;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSizeOf *) {
|
|
return IrInstSrcIdSizeOf;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestNonNull *) {
|
|
return IrInstSrcIdTestNonNull;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcOptionalUnwrapPtr *) {
|
|
return IrInstSrcIdOptionalUnwrapPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcClz *) {
|
|
return IrInstSrcIdClz;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCtz *) {
|
|
return IrInstSrcIdCtz;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPopCount *) {
|
|
return IrInstSrcIdPopCount;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBswap *) {
|
|
return IrInstSrcIdBswap;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitReverse *) {
|
|
return IrInstSrcIdBitReverse;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcImport *) {
|
|
return IrInstSrcIdImport;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCImport *) {
|
|
return IrInstSrcIdCImport;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCInclude *) {
|
|
return IrInstSrcIdCInclude;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCDefine *) {
|
|
return IrInstSrcIdCDefine;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCUndef *) {
|
|
return IrInstSrcIdCUndef;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcRef *) {
|
|
return IrInstSrcIdRef;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCompileErr *) {
|
|
return IrInstSrcIdCompileErr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCompileLog *) {
|
|
return IrInstSrcIdCompileLog;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrName *) {
|
|
return IrInstSrcIdErrName;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcEmbedFile *) {
|
|
return IrInstSrcIdEmbedFile;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCmpxchg *) {
|
|
return IrInstSrcIdCmpxchg;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFence *) {
|
|
return IrInstSrcIdFence;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcReduce *) {
|
|
return IrInstSrcIdReduce;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTruncate *) {
|
|
return IrInstSrcIdTruncate;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntCast *) {
|
|
return IrInstSrcIdIntCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatCast *) {
|
|
return IrInstSrcIdFloatCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToFloat *) {
|
|
return IrInstSrcIdIntToFloat;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatToInt *) {
|
|
return IrInstSrcIdFloatToInt;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBoolToInt *) {
|
|
return IrInstSrcIdBoolToInt;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcVectorType *) {
|
|
return IrInstSrcIdVectorType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcShuffleVector *) {
|
|
return IrInstSrcIdShuffleVector;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSplat *) {
|
|
return IrInstSrcIdSplat;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBoolNot *) {
|
|
return IrInstSrcIdBoolNot;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcMemset *) {
|
|
return IrInstSrcIdMemset;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcMemcpy *) {
|
|
return IrInstSrcIdMemcpy;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSlice *) {
|
|
return IrInstSrcIdSlice;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBreakpoint *) {
|
|
return IrInstSrcIdBreakpoint;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcReturnAddress *) {
|
|
return IrInstSrcIdReturnAddress;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameAddress *) {
|
|
return IrInstSrcIdFrameAddress;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameHandle *) {
|
|
return IrInstSrcIdFrameHandle;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameType *) {
|
|
return IrInstSrcIdFrameType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameSize *) {
|
|
return IrInstSrcIdFrameSize;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlignOf *) {
|
|
return IrInstSrcIdAlignOf;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcOverflowOp *) {
|
|
return IrInstSrcIdOverflowOp;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestErr *) {
|
|
return IrInstSrcIdTestErr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcMulAdd *) {
|
|
return IrInstSrcIdMulAdd;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatOp *) {
|
|
return IrInstSrcIdFloatOp;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnwrapErrCode *) {
|
|
return IrInstSrcIdUnwrapErrCode;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnwrapErrPayload *) {
|
|
return IrInstSrcIdUnwrapErrPayload;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFnProto *) {
|
|
return IrInstSrcIdFnProto;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestComptime *) {
|
|
return IrInstSrcIdTestComptime;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrCast *) {
|
|
return IrInstSrcIdPtrCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitCast *) {
|
|
return IrInstSrcIdBitCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToPtr *) {
|
|
return IrInstSrcIdIntToPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrToInt *) {
|
|
return IrInstSrcIdPtrToInt;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToEnum *) {
|
|
return IrInstSrcIdIntToEnum;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcEnumToInt *) {
|
|
return IrInstSrcIdEnumToInt;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToErr *) {
|
|
return IrInstSrcIdIntToErr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrToInt *) {
|
|
return IrInstSrcIdErrToInt;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckSwitchProngs *) {
|
|
return IrInstSrcIdCheckSwitchProngs;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckStatementIsVoid *) {
|
|
return IrInstSrcIdCheckStatementIsVoid;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeName *) {
|
|
return IrInstSrcIdTypeName;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcDeclRef *) {
|
|
return IrInstSrcIdDeclRef;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPanic *) {
|
|
return IrInstSrcIdPanic;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTagName *) {
|
|
return IrInstSrcIdTagName;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTagType *) {
|
|
return IrInstSrcIdTagType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFieldParentPtr *) {
|
|
return IrInstSrcIdFieldParentPtr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcByteOffsetOf *) {
|
|
return IrInstSrcIdByteOffsetOf;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitOffsetOf *) {
|
|
return IrInstSrcIdBitOffsetOf;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeInfo *) {
|
|
return IrInstSrcIdTypeInfo;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcType *) {
|
|
return IrInstSrcIdType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcHasField *) {
|
|
return IrInstSrcIdHasField;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetEvalBranchQuota *) {
|
|
return IrInstSrcIdSetEvalBranchQuota;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrType *) {
|
|
return IrInstSrcIdPtrType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlignCast *) {
|
|
return IrInstSrcIdAlignCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcImplicitCast *) {
|
|
return IrInstSrcIdImplicitCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcResolveResult *) {
|
|
return IrInstSrcIdResolveResult;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcResetResult *) {
|
|
return IrInstSrcIdResetResult;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetAlignStack *) {
|
|
return IrInstSrcIdSetAlignStack;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcArgType *) {
|
|
return IrInstSrcIdArgType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcExport *) {
|
|
return IrInstSrcIdExport;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcExtern *) {
|
|
return IrInstSrcIdExtern;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrorReturnTrace *) {
|
|
return IrInstSrcIdErrorReturnTrace;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrorUnion *) {
|
|
return IrInstSrcIdErrorUnion;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicRmw *) {
|
|
return IrInstSrcIdAtomicRmw;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicLoad *) {
|
|
return IrInstSrcIdAtomicLoad;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicStore *) {
|
|
return IrInstSrcIdAtomicStore;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSaveErrRetAddr *) {
|
|
return IrInstSrcIdSaveErrRetAddr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAddImplicitReturnType *) {
|
|
return IrInstSrcIdAddImplicitReturnType;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrSetCast *) {
|
|
return IrInstSrcIdErrSetCast;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckRuntimeScope *) {
|
|
return IrInstSrcIdCheckRuntimeScope;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcHasDecl *) {
|
|
return IrInstSrcIdHasDecl;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUndeclaredIdent *) {
|
|
return IrInstSrcIdUndeclaredIdent;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlloca *) {
|
|
return IrInstSrcIdAlloca;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcEndExpr *) {
|
|
return IrInstSrcIdEndExpr;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnionInitNamedField *) {
|
|
return IrInstSrcIdUnionInitNamedField;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSuspendBegin *) {
|
|
return IrInstSrcIdSuspendBegin;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSuspendFinish *) {
|
|
return IrInstSrcIdSuspendFinish;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcAwait *) {
|
|
return IrInstSrcIdAwait;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcResume *) {
|
|
return IrInstSrcIdResume;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSpillBegin *) {
|
|
return IrInstSrcIdSpillBegin;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSpillEnd *) {
|
|
return IrInstSrcIdSpillEnd;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcWasmMemorySize *) {
|
|
return IrInstSrcIdWasmMemorySize;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcWasmMemoryGrow *) {
|
|
return IrInstSrcIdWasmMemoryGrow;
|
|
}
|
|
|
|
static constexpr IrInstSrcId ir_inst_id(IrInstSrcSrc *) {
|
|
return IrInstSrcIdSrc;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenDeclVar *) {
|
|
return IrInstGenIdDeclVar;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBr *) {
|
|
return IrInstGenIdBr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenCondBr *) {
|
|
return IrInstGenIdCondBr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSwitchBr *) {
|
|
return IrInstGenIdSwitchBr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPhi *) {
|
|
return IrInstGenIdPhi;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBinaryNot *) {
|
|
return IrInstGenIdBinaryNot;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenNegation *) {
|
|
return IrInstGenIdNegation;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBinOp *) {
|
|
return IrInstGenIdBinOp;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenLoadPtr *) {
|
|
return IrInstGenIdLoadPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenStorePtr *) {
|
|
return IrInstGenIdStorePtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenVectorStoreElem *) {
|
|
return IrInstGenIdVectorStoreElem;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenStructFieldPtr *) {
|
|
return IrInstGenIdStructFieldPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenUnionFieldPtr *) {
|
|
return IrInstGenIdUnionFieldPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenElemPtr *) {
|
|
return IrInstGenIdElemPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenVarPtr *) {
|
|
return IrInstGenIdVarPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenReturnPtr *) {
|
|
return IrInstGenIdReturnPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenCall *) {
|
|
return IrInstGenIdCall;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenReturn *) {
|
|
return IrInstGenIdReturn;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenCast *) {
|
|
return IrInstGenIdCast;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenUnreachable *) {
|
|
return IrInstGenIdUnreachable;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAsm *) {
|
|
return IrInstGenIdAsm;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenTestNonNull *) {
|
|
return IrInstGenIdTestNonNull;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenOptionalUnwrapPtr *) {
|
|
return IrInstGenIdOptionalUnwrapPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenOptionalWrap *) {
|
|
return IrInstGenIdOptionalWrap;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenUnionTag *) {
|
|
return IrInstGenIdUnionTag;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenClz *) {
|
|
return IrInstGenIdClz;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenCtz *) {
|
|
return IrInstGenIdCtz;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPopCount *) {
|
|
return IrInstGenIdPopCount;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBswap *) {
|
|
return IrInstGenIdBswap;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBitReverse *) {
|
|
return IrInstGenIdBitReverse;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenRef *) {
|
|
return IrInstGenIdRef;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenErrName *) {
|
|
return IrInstGenIdErrName;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenCmpxchg *) {
|
|
return IrInstGenIdCmpxchg;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFence *) {
|
|
return IrInstGenIdFence;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenReduce *) {
|
|
return IrInstGenIdReduce;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenTruncate *) {
|
|
return IrInstGenIdTruncate;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenShuffleVector *) {
|
|
return IrInstGenIdShuffleVector;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSplat *) {
|
|
return IrInstGenIdSplat;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBoolNot *) {
|
|
return IrInstGenIdBoolNot;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenMemset *) {
|
|
return IrInstGenIdMemset;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenMemcpy *) {
|
|
return IrInstGenIdMemcpy;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSlice *) {
|
|
return IrInstGenIdSlice;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBreakpoint *) {
|
|
return IrInstGenIdBreakpoint;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenReturnAddress *) {
|
|
return IrInstGenIdReturnAddress;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFrameAddress *) {
|
|
return IrInstGenIdFrameAddress;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFrameHandle *) {
|
|
return IrInstGenIdFrameHandle;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFrameSize *) {
|
|
return IrInstGenIdFrameSize;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenOverflowOp *) {
|
|
return IrInstGenIdOverflowOp;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenTestErr *) {
|
|
return IrInstGenIdTestErr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenMulAdd *) {
|
|
return IrInstGenIdMulAdd;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFloatOp *) {
|
|
return IrInstGenIdFloatOp;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenUnwrapErrCode *) {
|
|
return IrInstGenIdUnwrapErrCode;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenUnwrapErrPayload *) {
|
|
return IrInstGenIdUnwrapErrPayload;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenErrWrapCode *) {
|
|
return IrInstGenIdErrWrapCode;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenErrWrapPayload *) {
|
|
return IrInstGenIdErrWrapPayload;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPtrCast *) {
|
|
return IrInstGenIdPtrCast;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenBitCast *) {
|
|
return IrInstGenIdBitCast;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenWidenOrShorten *) {
|
|
return IrInstGenIdWidenOrShorten;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenIntToPtr *) {
|
|
return IrInstGenIdIntToPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPtrToInt *) {
|
|
return IrInstGenIdPtrToInt;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenIntToEnum *) {
|
|
return IrInstGenIdIntToEnum;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenIntToErr *) {
|
|
return IrInstGenIdIntToErr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenErrToInt *) {
|
|
return IrInstGenIdErrToInt;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPanic *) {
|
|
return IrInstGenIdPanic;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenTagName *) {
|
|
return IrInstGenIdTagName;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenFieldParentPtr *) {
|
|
return IrInstGenIdFieldParentPtr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAlignCast *) {
|
|
return IrInstGenIdAlignCast;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenErrorReturnTrace *) {
|
|
return IrInstGenIdErrorReturnTrace;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicRmw *) {
|
|
return IrInstGenIdAtomicRmw;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicLoad *) {
|
|
return IrInstGenIdAtomicLoad;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicStore *) {
|
|
return IrInstGenIdAtomicStore;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSaveErrRetAddr *) {
|
|
return IrInstGenIdSaveErrRetAddr;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenVectorToArray *) {
|
|
return IrInstGenIdVectorToArray;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenArrayToVector *) {
|
|
return IrInstGenIdArrayToVector;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAssertZero *) {
|
|
return IrInstGenIdAssertZero;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAssertNonNull *) {
|
|
return IrInstGenIdAssertNonNull;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenPtrOfArrayToSlice *) {
|
|
return IrInstGenIdPtrOfArrayToSlice;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSuspendBegin *) {
|
|
return IrInstGenIdSuspendBegin;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSuspendFinish *) {
|
|
return IrInstGenIdSuspendFinish;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAwait *) {
|
|
return IrInstGenIdAwait;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenResume *) {
|
|
return IrInstGenIdResume;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSpillBegin *) {
|
|
return IrInstGenIdSpillBegin;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenSpillEnd *) {
|
|
return IrInstGenIdSpillEnd;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenVectorExtractElem *) {
|
|
return IrInstGenIdVectorExtractElem;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenAlloca *) {
|
|
return IrInstGenIdAlloca;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenConst *) {
|
|
return IrInstGenIdConst;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenWasmMemorySize *) {
|
|
return IrInstGenIdWasmMemorySize;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenWasmMemoryGrow *) {
|
|
return IrInstGenIdWasmMemoryGrow;
|
|
}
|
|
|
|
static constexpr IrInstGenId ir_inst_id(IrInstGenExtern *) {
|
|
return IrInstGenIdExtern;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_create_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = heap::c_allocator.create<T>();
|
|
special_instruction->base.id = ir_inst_id(special_instruction);
|
|
special_instruction->base.base.scope = scope;
|
|
special_instruction->base.base.source_node = source_node;
|
|
special_instruction->base.base.debug_id = exec_next_debug_id(irb->exec);
|
|
special_instruction->base.owner_bb = irb->current_basic_block;
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_create_inst_gen(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = heap::c_allocator.create<T>();
|
|
special_instruction->base.id = ir_inst_id(special_instruction);
|
|
special_instruction->base.base.scope = scope;
|
|
special_instruction->base.base.source_node = source_node;
|
|
special_instruction->base.base.debug_id = exec_next_debug_id_gen(irb->exec);
|
|
special_instruction->base.owner_bb = irb->current_basic_block;
|
|
special_instruction->base.value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_create_inst_noval(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = heap::c_allocator.create<T>();
|
|
special_instruction->base.id = ir_inst_id(special_instruction);
|
|
special_instruction->base.base.scope = scope;
|
|
special_instruction->base.base.source_node = source_node;
|
|
special_instruction->base.base.debug_id = exec_next_debug_id_gen(irb->exec);
|
|
special_instruction->base.owner_bb = irb->current_basic_block;
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_build_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = ir_create_instruction<T>(irb, scope, source_node);
|
|
ir_instruction_append(irb->current_basic_block, &special_instruction->base);
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_build_inst_gen(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = ir_create_inst_gen<T>(irb, scope, source_node);
|
|
ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_build_inst_noreturn(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = ir_create_inst_noval<T>(irb, scope, source_node);
|
|
special_instruction->base.value = irb->codegen->intern.for_unreachable();
|
|
ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
|
|
return special_instruction;
|
|
}
|
|
|
|
template<typename T>
|
|
static T *ir_build_inst_void(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
|
|
T *special_instruction = ir_create_inst_noval<T>(irb, scope, source_node);
|
|
special_instruction->base.value = irb->codegen->intern.for_void();
|
|
ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
|
|
return special_instruction;
|
|
}
|
|
|
|
IrInstGen *ir_create_alloca(CodeGen *g, Scope *scope, AstNode *source_node, ZigFn *fn,
|
|
ZigType *var_type, const char *name_hint)
|
|
{
|
|
IrInstGenAlloca *alloca_gen = heap::c_allocator.create<IrInstGenAlloca>();
|
|
alloca_gen->base.id = IrInstGenIdAlloca;
|
|
alloca_gen->base.base.source_node = source_node;
|
|
alloca_gen->base.base.scope = scope;
|
|
alloca_gen->base.value = g->pass1_arena->create<ZigValue>();
|
|
alloca_gen->base.value->type = get_pointer_to_type(g, var_type, false);
|
|
alloca_gen->base.base.ref_count = 1;
|
|
alloca_gen->name_hint = name_hint;
|
|
fn->alloca_gen_list.append(alloca_gen);
|
|
return &alloca_gen->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_cast(IrAnalyze *ira, IrInst *source_instr,ZigType *dest_type,
|
|
IrInstGen *value, CastOp cast_op)
|
|
{
|
|
IrInstGenCast *inst = ir_build_inst_gen<IrInstGenCast>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = dest_type;
|
|
inst->value = value;
|
|
inst->cast_op = cast_op;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_cond_br(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *condition,
|
|
IrBasicBlockSrc *then_block, IrBasicBlockSrc *else_block, IrInstSrc *is_comptime)
|
|
{
|
|
IrInstSrcCondBr *inst = ir_build_instruction<IrInstSrcCondBr>(irb, scope, source_node);
|
|
inst->base.is_noreturn = true;
|
|
inst->condition = condition;
|
|
inst->then_block = then_block;
|
|
inst->else_block = else_block;
|
|
inst->is_comptime = is_comptime;
|
|
|
|
ir_ref_instruction(condition, irb->current_basic_block);
|
|
ir_ref_bb(then_block);
|
|
ir_ref_bb(else_block);
|
|
if (is_comptime != nullptr) ir_ref_instruction(is_comptime, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_cond_br_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *condition,
|
|
IrBasicBlockGen *then_block, IrBasicBlockGen *else_block)
|
|
{
|
|
IrInstGenCondBr *inst = ir_build_inst_noreturn<IrInstGenCondBr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->condition = condition;
|
|
inst->then_block = then_block;
|
|
inst->else_block = else_block;
|
|
|
|
ir_ref_inst_gen(condition);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_return_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *operand) {
|
|
IrInstSrcReturn *inst = ir_build_instruction<IrInstSrcReturn>(irb, scope, source_node);
|
|
inst->base.is_noreturn = true;
|
|
inst->operand = operand;
|
|
|
|
if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_return_gen(IrAnalyze *ira, IrInst *source_inst, IrInstGen *operand) {
|
|
IrInstGenReturn *inst = ir_build_inst_noreturn<IrInstGenReturn>(&ira->new_irb,
|
|
source_inst->scope, source_inst->source_node);
|
|
inst->operand = operand;
|
|
|
|
if (operand != nullptr) ir_ref_inst_gen(operand);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_void(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
ir_instruction_append(irb->current_basic_block, &const_instruction->base);
|
|
const_instruction->value = irb->codegen->intern.for_void();
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_undefined(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
ir_instruction_append(irb->current_basic_block, &const_instruction->base);
|
|
const_instruction->value = irb->codegen->intern.for_undefined();
|
|
const_instruction->value->special = ConstValSpecialUndef;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_uint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, uint64_t value) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_int;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
bigint_init_unsigned(&const_instruction->value->data.x_bigint, value);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_bigint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, BigInt *bigint) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_int;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
bigint_init_bigint(&const_instruction->value->data.x_bigint, bigint);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_bigfloat(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, BigFloat *bigfloat) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_float;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
bigfloat_init_bigfloat(&const_instruction->value->data.x_bigfloat, bigfloat);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_null(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
ir_instruction_append(irb->current_basic_block, &const_instruction->base);
|
|
const_instruction->value = irb->codegen->intern.for_null();
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_usize(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, uint64_t value) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_usize;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
bigint_init_unsigned(&const_instruction->value->data.x_bigint, value);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_create_const_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ZigType *type_entry)
|
|
{
|
|
IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_type;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
const_instruction->value->data.x_type = type_entry;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ZigType *type_entry)
|
|
{
|
|
IrInstSrc *instruction = ir_create_const_type(irb, scope, source_node, type_entry);
|
|
ir_instruction_append(irb->current_basic_block, instruction);
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigType *import) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_type;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
const_instruction->value->data.x_type = import;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_bool(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, bool value) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_bool;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
const_instruction->value->data.x_bool = value;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_enum_literal(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *name) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = irb->codegen->builtin_types.entry_enum_literal;
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
const_instruction->value->data.x_enum_literal = name;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_create_const_str_lit(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *str) {
|
|
IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
init_const_str_lit(irb->codegen, const_instruction->value, str);
|
|
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_const_str_lit(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *str) {
|
|
IrInstSrc *instruction = ir_create_const_str_lit(irb, scope, source_node, str);
|
|
ir_instruction_append(irb->current_basic_block, instruction);
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bin_op(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrBinOp op_id,
|
|
IrInstSrc *op1, IrInstSrc *op2, bool safety_check_on)
|
|
{
|
|
IrInstSrcBinOp *inst = ir_build_instruction<IrInstSrcBinOp>(irb, scope, source_node);
|
|
inst->op_id = op_id;
|
|
inst->op1 = op1;
|
|
inst->op2 = op2;
|
|
inst->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_instruction(op1, irb->current_basic_block);
|
|
ir_ref_instruction(op2, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_bin_op_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *res_type,
|
|
IrBinOp op_id, IrInstGen *op1, IrInstGen *op2, bool safety_check_on)
|
|
{
|
|
IrInstGenBinOp *inst = ir_build_inst_gen<IrInstGenBinOp>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = res_type;
|
|
inst->op_id = op_id;
|
|
inst->op1 = op1;
|
|
inst->op2 = op2;
|
|
inst->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_inst_gen(op1);
|
|
ir_ref_inst_gen(op2);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
|
|
static IrInstSrc *ir_build_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *op1, IrInstSrc *op2, Buf *type_name)
|
|
{
|
|
IrInstSrcMergeErrSets *inst = ir_build_instruction<IrInstSrcMergeErrSets>(irb, scope, source_node);
|
|
inst->op1 = op1;
|
|
inst->op2 = op2;
|
|
inst->type_name = type_name;
|
|
|
|
ir_ref_instruction(op1, irb->current_basic_block);
|
|
ir_ref_instruction(op2, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_var_ptr_x(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
|
|
ScopeFnDef *crossed_fndef_scope)
|
|
{
|
|
IrInstSrcVarPtr *instruction = ir_build_instruction<IrInstSrcVarPtr>(irb, scope, source_node);
|
|
instruction->var = var;
|
|
instruction->crossed_fndef_scope = crossed_fndef_scope;
|
|
|
|
ir_ref_var(var);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_var_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var) {
|
|
return ir_build_var_ptr_x(irb, scope, source_node, var, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_build_var_ptr_gen(IrAnalyze *ira, IrInst *source_instr, ZigVar *var) {
|
|
IrInstGenVarPtr *instruction = ir_build_inst_gen<IrInstGenVarPtr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
instruction->var = var;
|
|
|
|
ir_ref_var(var);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_return_ptr(IrAnalyze *ira, Scope *scope, AstNode *source_node, ZigType *ty) {
|
|
IrInstGenReturnPtr *instruction = ir_build_inst_gen<IrInstGenReturnPtr>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = ty;
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_elem_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *array_ptr, IrInstSrc *elem_index, bool safety_check_on, PtrLen ptr_len,
|
|
AstNode *init_array_type_source_node)
|
|
{
|
|
IrInstSrcElemPtr *instruction = ir_build_instruction<IrInstSrcElemPtr>(irb, scope, source_node);
|
|
instruction->array_ptr = array_ptr;
|
|
instruction->elem_index = elem_index;
|
|
instruction->safety_check_on = safety_check_on;
|
|
instruction->ptr_len = ptr_len;
|
|
instruction->init_array_type_source_node = init_array_type_source_node;
|
|
|
|
ir_ref_instruction(array_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(elem_index, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_elem_ptr_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
IrInstGen *array_ptr, IrInstGen *elem_index, bool safety_check_on, ZigType *return_type)
|
|
{
|
|
IrInstGenElemPtr *instruction = ir_build_inst_gen<IrInstGenElemPtr>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = return_type;
|
|
instruction->array_ptr = array_ptr;
|
|
instruction->elem_index = elem_index;
|
|
instruction->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_inst_gen(array_ptr);
|
|
ir_ref_inst_gen(elem_index);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_field_ptr_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *container_ptr, IrInstSrc *field_name_expr, bool initializing)
|
|
{
|
|
IrInstSrcFieldPtr *instruction = ir_build_instruction<IrInstSrcFieldPtr>(irb, scope, source_node);
|
|
instruction->container_ptr = container_ptr;
|
|
instruction->field_name_buffer = nullptr;
|
|
instruction->field_name_expr = field_name_expr;
|
|
instruction->initializing = initializing;
|
|
|
|
ir_ref_instruction(container_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(field_name_expr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_field_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *container_ptr, Buf *field_name, bool initializing)
|
|
{
|
|
IrInstSrcFieldPtr *instruction = ir_build_instruction<IrInstSrcFieldPtr>(irb, scope, source_node);
|
|
instruction->container_ptr = container_ptr;
|
|
instruction->field_name_buffer = field_name;
|
|
instruction->field_name_expr = nullptr;
|
|
instruction->initializing = initializing;
|
|
|
|
ir_ref_instruction(container_ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_has_field(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *container_type, IrInstSrc *field_name)
|
|
{
|
|
IrInstSrcHasField *instruction = ir_build_instruction<IrInstSrcHasField>(irb, scope, source_node);
|
|
instruction->container_type = container_type;
|
|
instruction->field_name = field_name;
|
|
|
|
ir_ref_instruction(container_type, irb->current_basic_block);
|
|
ir_ref_instruction(field_name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_struct_field_ptr(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *struct_ptr, TypeStructField *field, ZigType *ptr_type)
|
|
{
|
|
IrInstGenStructFieldPtr *inst = ir_build_inst_gen<IrInstGenStructFieldPtr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ptr_type;
|
|
inst->struct_ptr = struct_ptr;
|
|
inst->field = field;
|
|
|
|
ir_ref_inst_gen(struct_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_union_field_ptr(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *union_ptr, TypeUnionField *field, bool safety_check_on, bool initializing, ZigType *ptr_type)
|
|
{
|
|
IrInstGenUnionFieldPtr *inst = ir_build_inst_gen<IrInstGenUnionFieldPtr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ptr_type;
|
|
inst->initializing = initializing;
|
|
inst->safety_check_on = safety_check_on;
|
|
inst->union_ptr = union_ptr;
|
|
inst->field = field;
|
|
|
|
ir_ref_inst_gen(union_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_call_extra(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *options, IrInstSrc *fn_ref, IrInstSrc *args, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcCallExtra *call_instruction = ir_build_instruction<IrInstSrcCallExtra>(irb, scope, source_node);
|
|
call_instruction->options = options;
|
|
call_instruction->fn_ref = fn_ref;
|
|
call_instruction->args = args;
|
|
call_instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(options, irb->current_basic_block);
|
|
ir_ref_instruction(fn_ref, irb->current_basic_block);
|
|
ir_ref_instruction(args, irb->current_basic_block);
|
|
|
|
return &call_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_async_call_extra(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
CallModifier modifier, IrInstSrc *fn_ref, IrInstSrc *ret_ptr, IrInstSrc *new_stack, IrInstSrc *args, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcAsyncCallExtra *call_instruction = ir_build_instruction<IrInstSrcAsyncCallExtra>(irb, scope, source_node);
|
|
call_instruction->modifier = modifier;
|
|
call_instruction->fn_ref = fn_ref;
|
|
call_instruction->ret_ptr = ret_ptr;
|
|
call_instruction->new_stack = new_stack;
|
|
call_instruction->args = args;
|
|
call_instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(fn_ref, irb->current_basic_block);
|
|
if (ret_ptr != nullptr) ir_ref_instruction(ret_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(new_stack, irb->current_basic_block);
|
|
ir_ref_instruction(args, irb->current_basic_block);
|
|
|
|
return &call_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_call_args(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *options, IrInstSrc *fn_ref, IrInstSrc **args_ptr, size_t args_len,
|
|
ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcCallArgs *call_instruction = ir_build_instruction<IrInstSrcCallArgs>(irb, scope, source_node);
|
|
call_instruction->options = options;
|
|
call_instruction->fn_ref = fn_ref;
|
|
call_instruction->args_ptr = args_ptr;
|
|
call_instruction->args_len = args_len;
|
|
call_instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(options, irb->current_basic_block);
|
|
ir_ref_instruction(fn_ref, irb->current_basic_block);
|
|
for (size_t i = 0; i < args_len; i += 1)
|
|
ir_ref_instruction(args_ptr[i], irb->current_basic_block);
|
|
|
|
return &call_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_call_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ZigFn *fn_entry, IrInstSrc *fn_ref, size_t arg_count, IrInstSrc **args,
|
|
IrInstSrc *ret_ptr, CallModifier modifier, bool is_async_call_builtin,
|
|
IrInstSrc *new_stack, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcCall *call_instruction = ir_build_instruction<IrInstSrcCall>(irb, scope, source_node);
|
|
call_instruction->fn_entry = fn_entry;
|
|
call_instruction->fn_ref = fn_ref;
|
|
call_instruction->args = args;
|
|
call_instruction->arg_count = arg_count;
|
|
call_instruction->modifier = modifier;
|
|
call_instruction->is_async_call_builtin = is_async_call_builtin;
|
|
call_instruction->new_stack = new_stack;
|
|
call_instruction->result_loc = result_loc;
|
|
call_instruction->ret_ptr = ret_ptr;
|
|
|
|
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
|
|
for (size_t i = 0; i < arg_count; i += 1)
|
|
ir_ref_instruction(args[i], irb->current_basic_block);
|
|
if (ret_ptr != nullptr) ir_ref_instruction(ret_ptr, irb->current_basic_block);
|
|
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
|
|
|
|
return &call_instruction->base;
|
|
}
|
|
|
|
static IrInstGenCall *ir_build_call_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigFn *fn_entry, IrInstGen *fn_ref, size_t arg_count, IrInstGen **args,
|
|
CallModifier modifier, IrInstGen *new_stack, bool is_async_call_builtin,
|
|
IrInstGen *result_loc, ZigType *return_type)
|
|
{
|
|
IrInstGenCall *call_instruction = ir_build_inst_gen<IrInstGenCall>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
call_instruction->base.value->type = return_type;
|
|
call_instruction->fn_entry = fn_entry;
|
|
call_instruction->fn_ref = fn_ref;
|
|
call_instruction->args = args;
|
|
call_instruction->arg_count = arg_count;
|
|
call_instruction->modifier = modifier;
|
|
call_instruction->is_async_call_builtin = is_async_call_builtin;
|
|
call_instruction->new_stack = new_stack;
|
|
call_instruction->result_loc = result_loc;
|
|
|
|
if (fn_ref != nullptr) ir_ref_inst_gen(fn_ref);
|
|
for (size_t i = 0; i < arg_count; i += 1)
|
|
ir_ref_inst_gen(args[i]);
|
|
if (new_stack != nullptr) ir_ref_inst_gen(new_stack);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return call_instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_phi(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
size_t incoming_count, IrBasicBlockSrc **incoming_blocks, IrInstSrc **incoming_values,
|
|
ResultLocPeerParent *peer_parent)
|
|
{
|
|
assert(incoming_count != 0);
|
|
assert(incoming_count != SIZE_MAX);
|
|
|
|
IrInstSrcPhi *phi_instruction = ir_build_instruction<IrInstSrcPhi>(irb, scope, source_node);
|
|
phi_instruction->incoming_count = incoming_count;
|
|
phi_instruction->incoming_blocks = incoming_blocks;
|
|
phi_instruction->incoming_values = incoming_values;
|
|
phi_instruction->peer_parent = peer_parent;
|
|
|
|
for (size_t i = 0; i < incoming_count; i += 1) {
|
|
ir_ref_bb(incoming_blocks[i]);
|
|
ir_ref_instruction(incoming_values[i], irb->current_basic_block);
|
|
}
|
|
|
|
return &phi_instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_phi_gen(IrAnalyze *ira, IrInst *source_instr, size_t incoming_count,
|
|
IrBasicBlockGen **incoming_blocks, IrInstGen **incoming_values, ZigType *result_type)
|
|
{
|
|
assert(incoming_count != 0);
|
|
assert(incoming_count != SIZE_MAX);
|
|
|
|
IrInstGenPhi *phi_instruction = ir_build_inst_gen<IrInstGenPhi>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
phi_instruction->base.value->type = result_type;
|
|
phi_instruction->incoming_count = incoming_count;
|
|
phi_instruction->incoming_blocks = incoming_blocks;
|
|
phi_instruction->incoming_values = incoming_values;
|
|
|
|
for (size_t i = 0; i < incoming_count; i += 1) {
|
|
ir_ref_inst_gen(incoming_values[i]);
|
|
}
|
|
|
|
return &phi_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_br(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrBasicBlockSrc *dest_block, IrInstSrc *is_comptime)
|
|
{
|
|
IrInstSrcBr *inst = ir_build_instruction<IrInstSrcBr>(irb, scope, source_node);
|
|
inst->base.is_noreturn = true;
|
|
inst->dest_block = dest_block;
|
|
inst->is_comptime = is_comptime;
|
|
|
|
ir_ref_bb(dest_block);
|
|
if (is_comptime) ir_ref_instruction(is_comptime, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_br_gen(IrAnalyze *ira, IrInst *source_instr, IrBasicBlockGen *dest_block) {
|
|
IrInstGenBr *inst = ir_build_inst_noreturn<IrInstGenBr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->dest_block = dest_block;
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_ptr_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *child_type, bool is_const, bool is_volatile, PtrLen ptr_len,
|
|
IrInstSrc *sentinel, IrInstSrc *align_value,
|
|
uint32_t bit_offset_start, uint32_t host_int_bytes, bool is_allow_zero)
|
|
{
|
|
IrInstSrcPtrType *inst = ir_build_instruction<IrInstSrcPtrType>(irb, scope, source_node);
|
|
inst->sentinel = sentinel;
|
|
inst->align_value = align_value;
|
|
inst->child_type = child_type;
|
|
inst->is_const = is_const;
|
|
inst->is_volatile = is_volatile;
|
|
inst->ptr_len = ptr_len;
|
|
inst->bit_offset_start = bit_offset_start;
|
|
inst->host_int_bytes = host_int_bytes;
|
|
inst->is_allow_zero = is_allow_zero;
|
|
|
|
if (sentinel) ir_ref_instruction(sentinel, irb->current_basic_block);
|
|
if (align_value) ir_ref_instruction(align_value, irb->current_basic_block);
|
|
ir_ref_instruction(child_type, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_un_op_lval(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrUnOp op_id,
|
|
IrInstSrc *value, LVal lval, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcUnOp *instruction = ir_build_instruction<IrInstSrcUnOp>(irb, scope, source_node);
|
|
instruction->op_id = op_id;
|
|
instruction->value = value;
|
|
instruction->lval = lval;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_un_op(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrUnOp op_id,
|
|
IrInstSrc *value)
|
|
{
|
|
return ir_build_un_op_lval(irb, scope, source_node, op_id, value, LValNone, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_build_negation(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand, ZigType *expr_type, bool wrapping) {
|
|
IrInstGenNegation *instruction = ir_build_inst_gen<IrInstGenNegation>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = expr_type;
|
|
instruction->operand = operand;
|
|
instruction->wrapping = wrapping;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_binary_not(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
|
|
ZigType *expr_type)
|
|
{
|
|
IrInstGenBinaryNot *instruction = ir_build_inst_gen<IrInstGenBinaryNot>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = expr_type;
|
|
instruction->operand = operand;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_container_init_list(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
size_t item_count, IrInstSrc **elem_result_loc_list, IrInstSrc *result_loc,
|
|
AstNode *init_array_type_source_node)
|
|
{
|
|
IrInstSrcContainerInitList *container_init_list_instruction =
|
|
ir_build_instruction<IrInstSrcContainerInitList>(irb, scope, source_node);
|
|
container_init_list_instruction->item_count = item_count;
|
|
container_init_list_instruction->elem_result_loc_list = elem_result_loc_list;
|
|
container_init_list_instruction->result_loc = result_loc;
|
|
container_init_list_instruction->init_array_type_source_node = init_array_type_source_node;
|
|
|
|
for (size_t i = 0; i < item_count; i += 1) {
|
|
ir_ref_instruction(elem_result_loc_list[i], irb->current_basic_block);
|
|
}
|
|
if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);
|
|
|
|
return &container_init_list_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_container_init_fields(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
size_t field_count, IrInstSrcContainerInitFieldsField *fields, IrInstSrc *result_loc)
|
|
{
|
|
IrInstSrcContainerInitFields *container_init_fields_instruction =
|
|
ir_build_instruction<IrInstSrcContainerInitFields>(irb, scope, source_node);
|
|
container_init_fields_instruction->field_count = field_count;
|
|
container_init_fields_instruction->fields = fields;
|
|
container_init_fields_instruction->result_loc = result_loc;
|
|
|
|
for (size_t i = 0; i < field_count; i += 1) {
|
|
ir_ref_instruction(fields[i].result_loc, irb->current_basic_block);
|
|
}
|
|
if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);
|
|
|
|
return &container_init_fields_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_unreachable(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcUnreachable *inst = ir_build_instruction<IrInstSrcUnreachable>(irb, scope, source_node);
|
|
inst->base.is_noreturn = true;
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_unreachable_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenUnreachable *inst = ir_build_inst_noreturn<IrInstGenUnreachable>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrcStorePtr *ir_build_store_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *ptr, IrInstSrc *value)
|
|
{
|
|
IrInstSrcStorePtr *instruction = ir_build_instruction<IrInstSrcStorePtr>(irb, scope, source_node);
|
|
instruction->ptr = ptr;
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_build_store_ptr_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *ptr, IrInstGen *value) {
|
|
IrInstGenStorePtr *instruction = ir_build_inst_void<IrInstGenStorePtr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->ptr = ptr;
|
|
instruction->value = value;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_vector_store_elem(IrAnalyze *ira, IrInst *src_inst,
|
|
IrInstGen *vector_ptr, IrInstGen *index, IrInstGen *value)
|
|
{
|
|
IrInstGenVectorStoreElem *inst = ir_build_inst_void<IrInstGenVectorStoreElem>(
|
|
&ira->new_irb, src_inst->scope, src_inst->source_node);
|
|
inst->vector_ptr = vector_ptr;
|
|
inst->index = index;
|
|
inst->value = value;
|
|
|
|
ir_ref_inst_gen(vector_ptr);
|
|
ir_ref_inst_gen(index);
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_var_decl_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ZigVar *var, IrInstSrc *align_value, IrInstSrc *ptr)
|
|
{
|
|
IrInstSrcDeclVar *inst = ir_build_instruction<IrInstSrcDeclVar>(irb, scope, source_node);
|
|
inst->var = var;
|
|
inst->align_value = align_value;
|
|
inst->ptr = ptr;
|
|
|
|
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_var_decl_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigVar *var, IrInstGen *var_ptr)
|
|
{
|
|
IrInstGenDeclVar *inst = ir_build_inst_gen<IrInstGenDeclVar>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
inst->base.value->special = ConstValSpecialStatic;
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_void;
|
|
inst->var = var;
|
|
inst->var_ptr = var_ptr;
|
|
|
|
ir_ref_inst_gen(var_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_export(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target, IrInstSrc *options)
|
|
{
|
|
IrInstSrcExport *export_instruction = ir_build_instruction<IrInstSrcExport>(
|
|
irb, scope, source_node);
|
|
export_instruction->target = target;
|
|
export_instruction->options = options;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
ir_ref_instruction(options, irb->current_basic_block);
|
|
|
|
return &export_instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_extern(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type, IrInstSrc *options)
|
|
{
|
|
IrInstSrcExtern *extern_instruction = ir_build_instruction<IrInstSrcExtern>(
|
|
irb, scope, source_node);
|
|
extern_instruction->type = type;
|
|
extern_instruction->options = options;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(options, irb->current_basic_block);
|
|
|
|
return &extern_instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_extern_gen(IrAnalyze *ira, IrInst *source_instr, Buf *name,
|
|
GlobalLinkageId linkage, bool is_thread_local, ZigType *expr_type)
|
|
{
|
|
IrInstGenExtern *instruction = ir_build_inst_gen<IrInstGenExtern>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = expr_type;
|
|
instruction->name = name;
|
|
instruction->linkage = linkage;
|
|
instruction->is_thread_local = is_thread_local;
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_load_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *ptr) {
|
|
IrInstSrcLoadPtr *instruction = ir_build_instruction<IrInstSrcLoadPtr>(irb, scope, source_node);
|
|
instruction->ptr = ptr;
|
|
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_load_ptr_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *ptr, ZigType *ty, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenLoadPtr *instruction = ir_build_inst_gen<IrInstGenLoadPtr>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ty;
|
|
instruction->ptr = ptr;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_typeof_n(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc **values, size_t value_count)
|
|
{
|
|
assert(value_count >= 2);
|
|
|
|
IrInstSrcTypeOf *instruction = ir_build_instruction<IrInstSrcTypeOf>(irb, scope, source_node);
|
|
instruction->value.list = values;
|
|
instruction->value_count = value_count;
|
|
|
|
for (size_t i = 0; i < value_count; i++)
|
|
ir_ref_instruction(values[i], irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_typeof_1(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
|
|
IrInstSrcTypeOf *instruction = ir_build_instruction<IrInstSrcTypeOf>(irb, scope, source_node);
|
|
instruction->value.scalar = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_set_cold(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *is_cold) {
|
|
IrInstSrcSetCold *instruction = ir_build_instruction<IrInstSrcSetCold>(irb, scope, source_node);
|
|
instruction->is_cold = is_cold;
|
|
|
|
ir_ref_instruction(is_cold, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_set_runtime_safety(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *safety_on)
|
|
{
|
|
IrInstSrcSetRuntimeSafety *inst = ir_build_instruction<IrInstSrcSetRuntimeSafety>(irb, scope, source_node);
|
|
inst->safety_on = safety_on;
|
|
|
|
ir_ref_instruction(safety_on, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_set_float_mode(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *mode_value)
|
|
{
|
|
IrInstSrcSetFloatMode *instruction = ir_build_instruction<IrInstSrcSetFloatMode>(irb, scope, source_node);
|
|
instruction->mode_value = mode_value;
|
|
|
|
ir_ref_instruction(mode_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_array_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *size,
|
|
IrInstSrc *sentinel, IrInstSrc *child_type)
|
|
{
|
|
IrInstSrcArrayType *instruction = ir_build_instruction<IrInstSrcArrayType>(irb, scope, source_node);
|
|
instruction->size = size;
|
|
instruction->sentinel = sentinel;
|
|
instruction->child_type = child_type;
|
|
|
|
ir_ref_instruction(size, irb->current_basic_block);
|
|
if (sentinel != nullptr) ir_ref_instruction(sentinel, irb->current_basic_block);
|
|
ir_ref_instruction(child_type, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_anyframe_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *payload_type)
|
|
{
|
|
IrInstSrcAnyFrameType *instruction = ir_build_instruction<IrInstSrcAnyFrameType>(irb, scope, source_node);
|
|
instruction->payload_type = payload_type;
|
|
|
|
if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_slice_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *child_type, bool is_const, bool is_volatile,
|
|
IrInstSrc *sentinel, IrInstSrc *align_value, bool is_allow_zero)
|
|
{
|
|
IrInstSrcSliceType *instruction = ir_build_instruction<IrInstSrcSliceType>(irb, scope, source_node);
|
|
instruction->is_const = is_const;
|
|
instruction->is_volatile = is_volatile;
|
|
instruction->child_type = child_type;
|
|
instruction->sentinel = sentinel;
|
|
instruction->align_value = align_value;
|
|
instruction->is_allow_zero = is_allow_zero;
|
|
|
|
if (sentinel != nullptr) ir_ref_instruction(sentinel, irb->current_basic_block);
|
|
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
|
|
ir_ref_instruction(child_type, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_asm_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *asm_template, IrInstSrc **input_list, IrInstSrc **output_types,
|
|
ZigVar **output_vars, size_t return_count, bool has_side_effects, bool is_global)
|
|
{
|
|
IrInstSrcAsm *instruction = ir_build_instruction<IrInstSrcAsm>(irb, scope, source_node);
|
|
instruction->asm_template = asm_template;
|
|
instruction->input_list = input_list;
|
|
instruction->output_types = output_types;
|
|
instruction->output_vars = output_vars;
|
|
instruction->return_count = return_count;
|
|
instruction->has_side_effects = has_side_effects;
|
|
instruction->is_global = is_global;
|
|
|
|
assert(source_node->type == NodeTypeAsmExpr);
|
|
for (size_t i = 0; i < source_node->data.asm_expr.output_list.length; i += 1) {
|
|
IrInstSrc *output_type = output_types[i];
|
|
if (output_type) ir_ref_instruction(output_type, irb->current_basic_block);
|
|
}
|
|
|
|
for (size_t i = 0; i < source_node->data.asm_expr.input_list.length; i += 1) {
|
|
IrInstSrc *input_value = input_list[i];
|
|
ir_ref_instruction(input_value, irb->current_basic_block);
|
|
}
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_asm_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
Buf *asm_template, AsmToken *token_list, size_t token_list_len,
|
|
IrInstGen **input_list, IrInstGen **output_types, ZigVar **output_vars, size_t return_count,
|
|
bool has_side_effects, ZigType *return_type)
|
|
{
|
|
IrInstGenAsm *instruction = ir_build_inst_gen<IrInstGenAsm>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = return_type;
|
|
instruction->asm_template = asm_template;
|
|
instruction->token_list = token_list;
|
|
instruction->token_list_len = token_list_len;
|
|
instruction->input_list = input_list;
|
|
instruction->output_types = output_types;
|
|
instruction->output_vars = output_vars;
|
|
instruction->return_count = return_count;
|
|
instruction->has_side_effects = has_side_effects;
|
|
|
|
assert(source_instr->source_node->type == NodeTypeAsmExpr);
|
|
for (size_t i = 0; i < source_instr->source_node->data.asm_expr.output_list.length; i += 1) {
|
|
IrInstGen *output_type = output_types[i];
|
|
if (output_type) ir_ref_inst_gen(output_type);
|
|
}
|
|
|
|
for (size_t i = 0; i < source_instr->source_node->data.asm_expr.input_list.length; i += 1) {
|
|
IrInstGen *input_value = input_list[i];
|
|
ir_ref_inst_gen(input_value);
|
|
}
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_size_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value,
|
|
bool bit_size)
|
|
{
|
|
IrInstSrcSizeOf *instruction = ir_build_instruction<IrInstSrcSizeOf>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
instruction->bit_size = bit_size;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_test_non_null_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *value)
|
|
{
|
|
IrInstSrcTestNonNull *instruction = ir_build_instruction<IrInstSrcTestNonNull>(irb, scope, source_node);
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_test_non_null_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value) {
|
|
IrInstGenTestNonNull *inst = ir_build_inst_gen<IrInstGenTestNonNull>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_bool;
|
|
inst->value = value;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_optional_unwrap_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *base_ptr, bool safety_check_on)
|
|
{
|
|
IrInstSrcOptionalUnwrapPtr *instruction = ir_build_instruction<IrInstSrcOptionalUnwrapPtr>(irb, scope, source_node);
|
|
instruction->base_ptr = base_ptr;
|
|
instruction->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_instruction(base_ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_optional_unwrap_ptr_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *base_ptr, bool safety_check_on, bool initializing, ZigType *result_type)
|
|
{
|
|
IrInstGenOptionalUnwrapPtr *inst = ir_build_inst_gen<IrInstGenOptionalUnwrapPtr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->base_ptr = base_ptr;
|
|
inst->safety_check_on = safety_check_on;
|
|
inst->initializing = initializing;
|
|
|
|
ir_ref_inst_gen(base_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_optional_wrap(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_ty,
|
|
IrInstGen *operand, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenOptionalWrap *instruction = ir_build_inst_gen<IrInstGenOptionalWrap>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_ty;
|
|
instruction->operand = operand;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_err_wrap_payload(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenErrWrapPayload *instruction = ir_build_inst_gen<IrInstGenErrWrapPayload>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->operand = operand;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_err_wrap_code(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenErrWrapCode *instruction = ir_build_inst_gen<IrInstGenErrWrapCode>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->operand = operand;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_clz(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
|
|
IrInstSrc *op)
|
|
{
|
|
IrInstSrcClz *instruction = ir_build_instruction<IrInstSrcClz>(irb, scope, source_node);
|
|
instruction->type = type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_clz_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type, IrInstGen *op) {
|
|
IrInstGenClz *instruction = ir_build_inst_gen<IrInstGenClz>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_inst_gen(op);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_ctz(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
|
|
IrInstSrc *op)
|
|
{
|
|
IrInstSrcCtz *instruction = ir_build_instruction<IrInstSrcCtz>(irb, scope, source_node);
|
|
instruction->type = type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_ctz_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type, IrInstGen *op) {
|
|
IrInstGenCtz *instruction = ir_build_inst_gen<IrInstGenCtz>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_inst_gen(op);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_pop_count(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
|
|
IrInstSrc *op)
|
|
{
|
|
IrInstSrcPopCount *instruction = ir_build_instruction<IrInstSrcPopCount>(irb, scope, source_node);
|
|
instruction->type = type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_pop_count_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type,
|
|
IrInstGen *op)
|
|
{
|
|
IrInstGenPopCount *instruction = ir_build_inst_gen<IrInstGenPopCount>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_inst_gen(op);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bswap(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
|
|
IrInstSrc *op)
|
|
{
|
|
IrInstSrcBswap *instruction = ir_build_instruction<IrInstSrcBswap>(irb, scope, source_node);
|
|
instruction->type = type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_bswap_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *op_type,
|
|
IrInstGen *op)
|
|
{
|
|
IrInstGenBswap *instruction = ir_build_inst_gen<IrInstGenBswap>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = op_type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_inst_gen(op);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bit_reverse(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
|
|
IrInstSrc *op)
|
|
{
|
|
IrInstSrcBitReverse *instruction = ir_build_instruction<IrInstSrcBitReverse>(irb, scope, source_node);
|
|
instruction->type = type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_instruction(type, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_bit_reverse_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *int_type,
|
|
IrInstGen *op)
|
|
{
|
|
IrInstGenBitReverse *instruction = ir_build_inst_gen<IrInstGenBitReverse>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = int_type;
|
|
instruction->op = op;
|
|
|
|
ir_ref_inst_gen(op);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrcSwitchBr *ir_build_switch_br_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target_value, IrBasicBlockSrc *else_block, size_t case_count, IrInstSrcSwitchBrCase *cases,
|
|
IrInstSrc *is_comptime, IrInstSrc *switch_prongs_void)
|
|
{
|
|
IrInstSrcSwitchBr *instruction = ir_build_instruction<IrInstSrcSwitchBr>(irb, scope, source_node);
|
|
instruction->base.is_noreturn = true;
|
|
instruction->target_value = target_value;
|
|
instruction->else_block = else_block;
|
|
instruction->case_count = case_count;
|
|
instruction->cases = cases;
|
|
instruction->is_comptime = is_comptime;
|
|
instruction->switch_prongs_void = switch_prongs_void;
|
|
|
|
ir_ref_instruction(target_value, irb->current_basic_block);
|
|
ir_ref_instruction(is_comptime, irb->current_basic_block);
|
|
ir_ref_bb(else_block);
|
|
ir_ref_instruction(switch_prongs_void, irb->current_basic_block);
|
|
|
|
for (size_t i = 0; i < case_count; i += 1) {
|
|
ir_ref_instruction(cases[i].value, irb->current_basic_block);
|
|
ir_ref_bb(cases[i].block);
|
|
}
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstGenSwitchBr *ir_build_switch_br_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *target_value, IrBasicBlockGen *else_block, size_t case_count, IrInstGenSwitchBrCase *cases)
|
|
{
|
|
IrInstGenSwitchBr *instruction = ir_build_inst_noreturn<IrInstGenSwitchBr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->target_value = target_value;
|
|
instruction->else_block = else_block;
|
|
instruction->case_count = case_count;
|
|
instruction->cases = cases;
|
|
|
|
ir_ref_inst_gen(target_value);
|
|
|
|
for (size_t i = 0; i < case_count; i += 1) {
|
|
ir_ref_inst_gen(cases[i].value);
|
|
}
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_switch_target(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target_value_ptr)
|
|
{
|
|
IrInstSrcSwitchTarget *instruction = ir_build_instruction<IrInstSrcSwitchTarget>(irb, scope, source_node);
|
|
instruction->target_value_ptr = target_value_ptr;
|
|
|
|
ir_ref_instruction(target_value_ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_switch_var(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target_value_ptr, IrInstSrc **prongs_ptr, size_t prongs_len)
|
|
{
|
|
IrInstSrcSwitchVar *instruction = ir_build_instruction<IrInstSrcSwitchVar>(irb, scope, source_node);
|
|
instruction->target_value_ptr = target_value_ptr;
|
|
instruction->prongs_ptr = prongs_ptr;
|
|
instruction->prongs_len = prongs_len;
|
|
|
|
ir_ref_instruction(target_value_ptr, irb->current_basic_block);
|
|
for (size_t i = 0; i < prongs_len; i += 1) {
|
|
ir_ref_instruction(prongs_ptr[i], irb->current_basic_block);
|
|
}
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
// For this instruction the switch_br must be set later.
|
|
static IrInstSrcSwitchElseVar *ir_build_switch_else_var(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target_value_ptr)
|
|
{
|
|
IrInstSrcSwitchElseVar *instruction = ir_build_instruction<IrInstSrcSwitchElseVar>(irb, scope, source_node);
|
|
instruction->target_value_ptr = target_value_ptr;
|
|
|
|
ir_ref_instruction(target_value_ptr, irb->current_basic_block);
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_build_union_tag(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
|
|
ZigType *tag_type)
|
|
{
|
|
IrInstGenUnionTag *instruction = ir_build_inst_gen<IrInstGenUnionTag>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->value = value;
|
|
instruction->base.value->type = tag_type;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
|
|
IrInstSrcImport *instruction = ir_build_instruction<IrInstSrcImport>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_ref_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
|
|
IrInstSrcRef *instruction = ir_build_instruction<IrInstSrcRef>(irb, scope, source_node);
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_ref_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
|
|
IrInstGen *operand, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenRef *instruction = ir_build_inst_gen<IrInstGenRef>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->operand = operand;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_compile_err(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *msg) {
|
|
IrInstSrcCompileErr *instruction = ir_build_instruction<IrInstSrcCompileErr>(irb, scope, source_node);
|
|
instruction->msg = msg;
|
|
|
|
ir_ref_instruction(msg, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_compile_log(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
size_t msg_count, IrInstSrc **msg_list)
|
|
{
|
|
IrInstSrcCompileLog *instruction = ir_build_instruction<IrInstSrcCompileLog>(irb, scope, source_node);
|
|
instruction->msg_count = msg_count;
|
|
instruction->msg_list = msg_list;
|
|
|
|
for (size_t i = 0; i < msg_count; i += 1) {
|
|
ir_ref_instruction(msg_list[i], irb->current_basic_block);
|
|
}
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_err_name(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
|
|
IrInstSrcErrName *instruction = ir_build_instruction<IrInstSrcErrName>(irb, scope, source_node);
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_err_name_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
|
|
ZigType *str_type)
|
|
{
|
|
IrInstGenErrName *instruction = ir_build_inst_gen<IrInstGenErrName>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = str_type;
|
|
instruction->value = value;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_c_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcCImport *instruction = ir_build_instruction<IrInstSrcCImport>(irb, scope, source_node);
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_c_include(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
|
|
IrInstSrcCInclude *instruction = ir_build_instruction<IrInstSrcCInclude>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_c_define(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name, IrInstSrc *value) {
|
|
IrInstSrcCDefine *instruction = ir_build_instruction<IrInstSrcCDefine>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_c_undef(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
|
|
IrInstSrcCUndef *instruction = ir_build_instruction<IrInstSrcCUndef>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_embed_file(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
|
|
IrInstSrcEmbedFile *instruction = ir_build_instruction<IrInstSrcEmbedFile>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_cmpxchg_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value, IrInstSrc *ptr, IrInstSrc *cmp_value, IrInstSrc *new_value,
|
|
IrInstSrc *success_order_value, IrInstSrc *failure_order_value, bool is_weak, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcCmpxchg *instruction = ir_build_instruction<IrInstSrcCmpxchg>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
instruction->ptr = ptr;
|
|
instruction->cmp_value = cmp_value;
|
|
instruction->new_value = new_value;
|
|
instruction->success_order_value = success_order_value;
|
|
instruction->failure_order_value = failure_order_value;
|
|
instruction->is_weak = is_weak;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(cmp_value, irb->current_basic_block);
|
|
ir_ref_instruction(new_value, irb->current_basic_block);
|
|
ir_ref_instruction(success_order_value, irb->current_basic_block);
|
|
ir_ref_instruction(failure_order_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
|
|
IrInstGen *ptr, IrInstGen *cmp_value, IrInstGen *new_value,
|
|
AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenCmpxchg *instruction = ir_build_inst_gen<IrInstGenCmpxchg>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->ptr = ptr;
|
|
instruction->cmp_value = cmp_value;
|
|
instruction->new_value = new_value;
|
|
instruction->success_order = success_order;
|
|
instruction->failure_order = failure_order;
|
|
instruction->is_weak = is_weak;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
ir_ref_inst_gen(cmp_value);
|
|
ir_ref_inst_gen(new_value);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_fence(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *order) {
|
|
IrInstSrcFence *instruction = ir_build_instruction<IrInstSrcFence>(irb, scope, source_node);
|
|
instruction->order = order;
|
|
|
|
ir_ref_instruction(order, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_fence_gen(IrAnalyze *ira, IrInst *source_instr, AtomicOrder order) {
|
|
IrInstGenFence *instruction = ir_build_inst_void<IrInstGenFence>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->order = order;
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_reduce(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *op, IrInstSrc *value) {
|
|
IrInstSrcReduce *instruction = ir_build_instruction<IrInstSrcReduce>(irb, scope, source_node);
|
|
instruction->op = op;
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_reduce_gen(IrAnalyze *ira, IrInst *source_instruction, ReduceOp op, IrInstGen *value, ZigType *result_type) {
|
|
IrInstGenReduce *instruction = ir_build_inst_gen<IrInstGenReduce>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->op = op;
|
|
instruction->value = value;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_truncate(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcTruncate *instruction = ir_build_instruction<IrInstSrcTruncate>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_truncate_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *dest_type,
|
|
IrInstGen *target)
|
|
{
|
|
IrInstGenTruncate *instruction = ir_build_inst_gen<IrInstGenTruncate>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_int_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *dest_type,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcIntCast *instruction = ir_build_instruction<IrInstSrcIntCast>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_float_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *dest_type,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcFloatCast *instruction = ir_build_instruction<IrInstSrcFloatCast>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_err_set_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcErrSetCast *instruction = ir_build_instruction<IrInstSrcErrSetCast>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_int_to_float(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcIntToFloat *instruction = ir_build_instruction<IrInstSrcIntToFloat>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_float_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcFloatToInt *instruction = ir_build_instruction<IrInstSrcFloatToInt>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bool_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *target) {
|
|
IrInstSrcBoolToInt *instruction = ir_build_instruction<IrInstSrcBoolToInt>(irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_vector_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *len,
|
|
IrInstSrc *elem_type)
|
|
{
|
|
IrInstSrcVectorType *instruction = ir_build_instruction<IrInstSrcVectorType>(irb, scope, source_node);
|
|
instruction->len = len;
|
|
instruction->elem_type = elem_type;
|
|
|
|
ir_ref_instruction(len, irb->current_basic_block);
|
|
ir_ref_instruction(elem_type, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_shuffle_vector(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *scalar_type, IrInstSrc *a, IrInstSrc *b, IrInstSrc *mask)
|
|
{
|
|
IrInstSrcShuffleVector *instruction = ir_build_instruction<IrInstSrcShuffleVector>(irb, scope, source_node);
|
|
instruction->scalar_type = scalar_type;
|
|
instruction->a = a;
|
|
instruction->b = b;
|
|
instruction->mask = mask;
|
|
|
|
if (scalar_type != nullptr) ir_ref_instruction(scalar_type, irb->current_basic_block);
|
|
ir_ref_instruction(a, irb->current_basic_block);
|
|
ir_ref_instruction(b, irb->current_basic_block);
|
|
ir_ref_instruction(mask, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_shuffle_vector_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
ZigType *result_type, IrInstGen *a, IrInstGen *b, IrInstGen *mask)
|
|
{
|
|
IrInstGenShuffleVector *inst = ir_build_inst_gen<IrInstGenShuffleVector>(&ira->new_irb, scope, source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->a = a;
|
|
inst->b = b;
|
|
inst->mask = mask;
|
|
|
|
ir_ref_inst_gen(a);
|
|
ir_ref_inst_gen(b);
|
|
ir_ref_inst_gen(mask);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_splat_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *len, IrInstSrc *scalar)
|
|
{
|
|
IrInstSrcSplat *instruction = ir_build_instruction<IrInstSrcSplat>(irb, scope, source_node);
|
|
instruction->len = len;
|
|
instruction->scalar = scalar;
|
|
|
|
ir_ref_instruction(len, irb->current_basic_block);
|
|
ir_ref_instruction(scalar, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_splat_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
|
|
IrInstGen *scalar)
|
|
{
|
|
IrInstGenSplat *instruction = ir_build_inst_gen<IrInstGenSplat>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->scalar = scalar;
|
|
|
|
ir_ref_inst_gen(scalar);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bool_not(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
|
|
IrInstSrcBoolNot *instruction = ir_build_instruction<IrInstSrcBoolNot>(irb, scope, source_node);
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_bool_not_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value) {
|
|
IrInstGenBoolNot *instruction = ir_build_inst_gen<IrInstGenBoolNot>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
|
|
instruction->value = value;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_memset_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_ptr, IrInstSrc *byte, IrInstSrc *count)
|
|
{
|
|
IrInstSrcMemset *instruction = ir_build_instruction<IrInstSrcMemset>(irb, scope, source_node);
|
|
instruction->dest_ptr = dest_ptr;
|
|
instruction->byte = byte;
|
|
instruction->count = count;
|
|
|
|
ir_ref_instruction(dest_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(byte, irb->current_basic_block);
|
|
ir_ref_instruction(count, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_memset_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *dest_ptr, IrInstGen *byte, IrInstGen *count)
|
|
{
|
|
IrInstGenMemset *instruction = ir_build_inst_void<IrInstGenMemset>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->dest_ptr = dest_ptr;
|
|
instruction->byte = byte;
|
|
instruction->count = count;
|
|
|
|
ir_ref_inst_gen(dest_ptr);
|
|
ir_ref_inst_gen(byte);
|
|
ir_ref_inst_gen(count);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_memcpy_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_ptr, IrInstSrc *src_ptr, IrInstSrc *count)
|
|
{
|
|
IrInstSrcMemcpy *instruction = ir_build_instruction<IrInstSrcMemcpy>(irb, scope, source_node);
|
|
instruction->dest_ptr = dest_ptr;
|
|
instruction->src_ptr = src_ptr;
|
|
instruction->count = count;
|
|
|
|
ir_ref_instruction(dest_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(src_ptr, irb->current_basic_block);
|
|
ir_ref_instruction(count, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_memcpy_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *dest_ptr, IrInstGen *src_ptr, IrInstGen *count)
|
|
{
|
|
IrInstGenMemcpy *instruction = ir_build_inst_void<IrInstGenMemcpy>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->dest_ptr = dest_ptr;
|
|
instruction->src_ptr = src_ptr;
|
|
instruction->count = count;
|
|
|
|
ir_ref_inst_gen(dest_ptr);
|
|
ir_ref_inst_gen(src_ptr);
|
|
ir_ref_inst_gen(count);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_slice_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *ptr, IrInstSrc *start, IrInstSrc *end, IrInstSrc *sentinel,
|
|
bool safety_check_on, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcSlice *instruction = ir_build_instruction<IrInstSrcSlice>(irb, scope, source_node);
|
|
instruction->ptr = ptr;
|
|
instruction->start = start;
|
|
instruction->end = end;
|
|
instruction->sentinel = sentinel;
|
|
instruction->safety_check_on = safety_check_on;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(start, irb->current_basic_block);
|
|
if (end) ir_ref_instruction(end, irb->current_basic_block);
|
|
if (sentinel) ir_ref_instruction(sentinel, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_slice_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *slice_type,
|
|
IrInstGen *ptr, IrInstGen *start, IrInstGen *end, bool safety_check_on, IrInstGen *result_loc,
|
|
ZigValue *sentinel)
|
|
{
|
|
IrInstGenSlice *instruction = ir_build_inst_gen<IrInstGenSlice>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = slice_type;
|
|
instruction->ptr = ptr;
|
|
instruction->start = start;
|
|
instruction->end = end;
|
|
instruction->safety_check_on = safety_check_on;
|
|
instruction->result_loc = result_loc;
|
|
instruction->sentinel = sentinel;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
ir_ref_inst_gen(start);
|
|
if (end != nullptr) ir_ref_inst_gen(end);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_breakpoint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcBreakpoint *instruction = ir_build_instruction<IrInstSrcBreakpoint>(irb, scope, source_node);
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_breakpoint_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenBreakpoint *instruction = ir_build_inst_void<IrInstGenBreakpoint>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_return_address_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcReturnAddress *instruction = ir_build_instruction<IrInstSrcReturnAddress>(irb, scope, source_node);
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_return_address_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenReturnAddress *inst = ir_build_inst_gen<IrInstGenReturnAddress>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_usize;
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_frame_address_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcFrameAddress *inst = ir_build_instruction<IrInstSrcFrameAddress>(irb, scope, source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_frame_address_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenFrameAddress *inst = ir_build_inst_gen<IrInstGenFrameAddress>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_usize;
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_handle_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcFrameHandle *inst = ir_build_instruction<IrInstSrcFrameHandle>(irb, scope, source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_handle_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *ty) {
|
|
IrInstGenFrameHandle *inst = ir_build_inst_gen<IrInstGenFrameHandle>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ty;
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_frame_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *fn) {
|
|
IrInstSrcFrameType *inst = ir_build_instruction<IrInstSrcFrameType>(irb, scope, source_node);
|
|
inst->fn = fn;
|
|
|
|
ir_ref_instruction(fn, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_frame_size_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *fn) {
|
|
IrInstSrcFrameSize *inst = ir_build_instruction<IrInstSrcFrameSize>(irb, scope, source_node);
|
|
inst->fn = fn;
|
|
|
|
ir_ref_instruction(fn, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_frame_size_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *fn)
|
|
{
|
|
IrInstGenFrameSize *inst = ir_build_inst_gen<IrInstGenFrameSize>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_usize;
|
|
inst->fn = fn;
|
|
|
|
ir_ref_inst_gen(fn);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_overflow_op_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrOverflowOp op, IrInstSrc *type_value, IrInstSrc *op1, IrInstSrc *op2, IrInstSrc *result_ptr)
|
|
{
|
|
IrInstSrcOverflowOp *instruction = ir_build_instruction<IrInstSrcOverflowOp>(irb, scope, source_node);
|
|
instruction->op = op;
|
|
instruction->type_value = type_value;
|
|
instruction->op1 = op1;
|
|
instruction->op2 = op2;
|
|
instruction->result_ptr = result_ptr;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(op1, irb->current_basic_block);
|
|
ir_ref_instruction(op2, irb->current_basic_block);
|
|
ir_ref_instruction(result_ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_overflow_op_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrOverflowOp op, IrInstGen *op1, IrInstGen *op2, IrInstGen *result_ptr,
|
|
ZigType *result_ptr_type)
|
|
{
|
|
IrInstGenOverflowOp *instruction = ir_build_inst_gen<IrInstGenOverflowOp>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
|
|
instruction->op = op;
|
|
instruction->op1 = op1;
|
|
instruction->op2 = op2;
|
|
instruction->result_ptr = result_ptr;
|
|
instruction->result_ptr_type = result_ptr_type;
|
|
|
|
ir_ref_inst_gen(op1);
|
|
ir_ref_inst_gen(op2);
|
|
ir_ref_inst_gen(result_ptr);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_float_op_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *operand,
|
|
BuiltinFnId fn_id)
|
|
{
|
|
IrInstSrcFloatOp *instruction = ir_build_instruction<IrInstSrcFloatOp>(irb, scope, source_node);
|
|
instruction->operand = operand;
|
|
instruction->fn_id = fn_id;
|
|
|
|
ir_ref_instruction(operand, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_float_op_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
|
|
BuiltinFnId fn_id, ZigType *operand_type)
|
|
{
|
|
IrInstGenFloatOp *instruction = ir_build_inst_gen<IrInstGenFloatOp>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = operand_type;
|
|
instruction->operand = operand;
|
|
instruction->fn_id = fn_id;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_mul_add_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value, IrInstSrc *op1, IrInstSrc *op2, IrInstSrc *op3)
|
|
{
|
|
IrInstSrcMulAdd *instruction = ir_build_instruction<IrInstSrcMulAdd>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
instruction->op1 = op1;
|
|
instruction->op2 = op2;
|
|
instruction->op3 = op3;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(op1, irb->current_basic_block);
|
|
ir_ref_instruction(op2, irb->current_basic_block);
|
|
ir_ref_instruction(op3, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_mul_add_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *op1, IrInstGen *op2,
|
|
IrInstGen *op3, ZigType *expr_type)
|
|
{
|
|
IrInstGenMulAdd *instruction = ir_build_inst_gen<IrInstGenMulAdd>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = expr_type;
|
|
instruction->op1 = op1;
|
|
instruction->op2 = op2;
|
|
instruction->op3 = op3;
|
|
|
|
ir_ref_inst_gen(op1);
|
|
ir_ref_inst_gen(op2);
|
|
ir_ref_inst_gen(op3);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_align_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value) {
|
|
IrInstSrcAlignOf *instruction = ir_build_instruction<IrInstSrcAlignOf>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_test_err_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *base_ptr, bool resolve_err_set, bool base_ptr_is_payload)
|
|
{
|
|
IrInstSrcTestErr *instruction = ir_build_instruction<IrInstSrcTestErr>(irb, scope, source_node);
|
|
instruction->base_ptr = base_ptr;
|
|
instruction->resolve_err_set = resolve_err_set;
|
|
instruction->base_ptr_is_payload = base_ptr_is_payload;
|
|
|
|
ir_ref_instruction(base_ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_test_err_gen(IrAnalyze *ira, IrInst *source_instruction, IrInstGen *err_union) {
|
|
IrInstGenTestErr *instruction = ir_build_inst_gen<IrInstGenTestErr>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
|
|
instruction->err_union = err_union;
|
|
|
|
ir_ref_inst_gen(err_union);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_unwrap_err_code_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *err_union_ptr)
|
|
{
|
|
IrInstSrcUnwrapErrCode *inst = ir_build_instruction<IrInstSrcUnwrapErrCode>(irb, scope, source_node);
|
|
inst->err_union_ptr = err_union_ptr;
|
|
|
|
ir_ref_instruction(err_union_ptr, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_unwrap_err_code_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
IrInstGen *err_union_ptr, ZigType *result_type)
|
|
{
|
|
IrInstGenUnwrapErrCode *inst = ir_build_inst_gen<IrInstGenUnwrapErrCode>(&ira->new_irb, scope, source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->err_union_ptr = err_union_ptr;
|
|
|
|
ir_ref_inst_gen(err_union_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_unwrap_err_payload_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *value, bool safety_check_on, bool initializing)
|
|
{
|
|
IrInstSrcUnwrapErrPayload *inst = ir_build_instruction<IrInstSrcUnwrapErrPayload>(irb, scope, source_node);
|
|
inst->value = value;
|
|
inst->safety_check_on = safety_check_on;
|
|
inst->initializing = initializing;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_unwrap_err_payload_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
IrInstGen *value, bool safety_check_on, bool initializing, ZigType *result_type)
|
|
{
|
|
IrInstGenUnwrapErrPayload *inst = ir_build_inst_gen<IrInstGenUnwrapErrPayload>(&ira->new_irb, scope, source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->value = value;
|
|
inst->safety_check_on = safety_check_on;
|
|
inst->initializing = initializing;
|
|
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_fn_proto(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc **param_types, IrInstSrc *align_value, IrInstSrc *callconv_value,
|
|
IrInstSrc *return_type, bool is_var_args)
|
|
{
|
|
IrInstSrcFnProto *instruction = ir_build_instruction<IrInstSrcFnProto>(irb, scope, source_node);
|
|
instruction->param_types = param_types;
|
|
instruction->align_value = align_value;
|
|
instruction->callconv_value = callconv_value;
|
|
instruction->return_type = return_type;
|
|
instruction->is_var_args = is_var_args;
|
|
|
|
assert(source_node->type == NodeTypeFnProto);
|
|
size_t param_count = source_node->data.fn_proto.params.length;
|
|
if (is_var_args) param_count -= 1;
|
|
for (size_t i = 0; i < param_count; i += 1) {
|
|
if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block);
|
|
}
|
|
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
|
|
if (callconv_value != nullptr) ir_ref_instruction(callconv_value, irb->current_basic_block);
|
|
ir_ref_instruction(return_type, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_test_comptime(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
|
|
IrInstSrcTestComptime *instruction = ir_build_instruction<IrInstSrcTestComptime>(irb, scope, source_node);
|
|
instruction->value = value;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_ptr_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *ptr, bool safety_check_on)
|
|
{
|
|
IrInstSrcPtrCast *instruction = ir_build_instruction<IrInstSrcPtrCast>(
|
|
irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->ptr = ptr;
|
|
instruction->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_ptr_cast_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigType *ptr_type, IrInstGen *ptr, bool safety_check_on)
|
|
{
|
|
IrInstGenPtrCast *instruction = ir_build_inst_gen<IrInstGenPtrCast>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ptr_type;
|
|
instruction->ptr = ptr;
|
|
instruction->safety_check_on = safety_check_on;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_implicit_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand, ResultLocCast *result_loc_cast)
|
|
{
|
|
IrInstSrcImplicitCast *instruction = ir_build_instruction<IrInstSrcImplicitCast>(irb, scope, source_node);
|
|
instruction->operand = operand;
|
|
instruction->result_loc_cast = result_loc_cast;
|
|
|
|
ir_ref_instruction(operand, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bit_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand, ResultLocBitCast *result_loc_bit_cast)
|
|
{
|
|
IrInstSrcBitCast *instruction = ir_build_instruction<IrInstSrcBitCast>(irb, scope, source_node);
|
|
instruction->operand = operand;
|
|
instruction->result_loc_bit_cast = result_loc_bit_cast;
|
|
|
|
ir_ref_instruction(operand, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_bit_cast_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *operand, ZigType *ty)
|
|
{
|
|
IrInstGenBitCast *instruction = ir_build_inst_gen<IrInstGenBitCast>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ty;
|
|
instruction->operand = operand;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_widen_or_shorten(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
|
|
ZigType *result_type)
|
|
{
|
|
IrInstGenWidenOrShorten *inst = ir_build_inst_gen<IrInstGenWidenOrShorten>(&ira->new_irb, scope, source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_int_to_ptr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcIntToPtr *instruction = ir_build_instruction<IrInstSrcIntToPtr>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_int_to_ptr_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
IrInstGen *target, ZigType *ptr_type)
|
|
{
|
|
IrInstGenIntToPtr *instruction = ir_build_inst_gen<IrInstGenIntToPtr>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = ptr_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_ptr_to_int_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcPtrToInt *inst = ir_build_instruction<IrInstSrcPtrToInt>(irb, scope, source_node);
|
|
inst->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_ptr_to_int_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target) {
|
|
IrInstGenPtrToInt *inst = ir_build_inst_gen<IrInstGenPtrToInt>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = ira->codegen->builtin_types.entry_usize;
|
|
inst->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_int_to_enum_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *dest_type, IrInstSrc *target)
|
|
{
|
|
IrInstSrcIntToEnum *instruction = ir_build_instruction<IrInstSrcIntToEnum>(irb, scope, source_node);
|
|
instruction->dest_type = dest_type;
|
|
instruction->target = target;
|
|
|
|
if (dest_type) ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_int_to_enum_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
ZigType *dest_type, IrInstGen *target)
|
|
{
|
|
IrInstGenIntToEnum *instruction = ir_build_inst_gen<IrInstGenIntToEnum>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = dest_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_enum_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcEnumToInt *instruction = ir_build_instruction<IrInstSrcEnumToInt>(
|
|
irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_int_to_err_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcIntToErr *instruction = ir_build_instruction<IrInstSrcIntToErr>(irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_int_to_err_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
|
|
ZigType *wanted_type)
|
|
{
|
|
IrInstGenIntToErr *instruction = ir_build_inst_gen<IrInstGenIntToErr>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = wanted_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_err_to_int_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcErrToInt *instruction = ir_build_instruction<IrInstSrcErrToInt>(
|
|
irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_err_to_int_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
|
|
ZigType *wanted_type)
|
|
{
|
|
IrInstGenErrToInt *instruction = ir_build_inst_gen<IrInstGenErrToInt>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = wanted_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_check_switch_prongs(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target_value, IrInstSrcCheckSwitchProngsRange *ranges, size_t range_count,
|
|
AstNode* else_prong, bool have_underscore_prong)
|
|
{
|
|
IrInstSrcCheckSwitchProngs *instruction = ir_build_instruction<IrInstSrcCheckSwitchProngs>(
|
|
irb, scope, source_node);
|
|
instruction->target_value = target_value;
|
|
instruction->ranges = ranges;
|
|
instruction->range_count = range_count;
|
|
instruction->else_prong = else_prong;
|
|
instruction->have_underscore_prong = have_underscore_prong;
|
|
|
|
ir_ref_instruction(target_value, irb->current_basic_block);
|
|
for (size_t i = 0; i < range_count; i += 1) {
|
|
ir_ref_instruction(ranges[i].start, irb->current_basic_block);
|
|
ir_ref_instruction(ranges[i].end, irb->current_basic_block);
|
|
}
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_check_statement_is_void(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc* statement_value)
|
|
{
|
|
IrInstSrcCheckStatementIsVoid *instruction = ir_build_instruction<IrInstSrcCheckStatementIsVoid>(
|
|
irb, scope, source_node);
|
|
instruction->statement_value = statement_value;
|
|
|
|
ir_ref_instruction(statement_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_type_name(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value)
|
|
{
|
|
IrInstSrcTypeName *instruction = ir_build_instruction<IrInstSrcTypeName>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_decl_ref(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Tld *tld, LVal lval) {
|
|
IrInstSrcDeclRef *instruction = ir_build_instruction<IrInstSrcDeclRef>(irb, scope, source_node);
|
|
instruction->tld = tld;
|
|
instruction->lval = lval;
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_panic_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *msg) {
|
|
IrInstSrcPanic *instruction = ir_build_instruction<IrInstSrcPanic>(irb, scope, source_node);
|
|
instruction->base.is_noreturn = true;
|
|
instruction->msg = msg;
|
|
|
|
ir_ref_instruction(msg, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_panic_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *msg) {
|
|
IrInstGenPanic *instruction = ir_build_inst_noreturn<IrInstGenPanic>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->msg = msg;
|
|
|
|
ir_ref_inst_gen(msg);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_tag_name_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *target) {
|
|
IrInstSrcTagName *instruction = ir_build_instruction<IrInstSrcTagName>(irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_tag_name_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target,
|
|
ZigType *result_type)
|
|
{
|
|
IrInstGenTagName *instruction = ir_build_inst_gen<IrInstGenTagName>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_tag_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *target)
|
|
{
|
|
IrInstSrcTagType *instruction = ir_build_instruction<IrInstSrcTagType>(irb, scope, source_node);
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_field_parent_ptr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value, IrInstSrc *field_name, IrInstSrc *field_ptr)
|
|
{
|
|
IrInstSrcFieldParentPtr *inst = ir_build_instruction<IrInstSrcFieldParentPtr>(
|
|
irb, scope, source_node);
|
|
inst->type_value = type_value;
|
|
inst->field_name = field_name;
|
|
inst->field_ptr = field_ptr;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(field_name, irb->current_basic_block);
|
|
ir_ref_instruction(field_ptr, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_field_parent_ptr_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *field_ptr, TypeStructField *field, ZigType *result_type)
|
|
{
|
|
IrInstGenFieldParentPtr *inst = ir_build_inst_gen<IrInstGenFieldParentPtr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->field_ptr = field_ptr;
|
|
inst->field = field;
|
|
|
|
ir_ref_inst_gen(field_ptr);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_byte_offset_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value, IrInstSrc *field_name)
|
|
{
|
|
IrInstSrcByteOffsetOf *instruction = ir_build_instruction<IrInstSrcByteOffsetOf>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
instruction->field_name = field_name;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(field_name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_bit_offset_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *type_value, IrInstSrc *field_name)
|
|
{
|
|
IrInstSrcBitOffsetOf *instruction = ir_build_instruction<IrInstSrcBitOffsetOf>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
instruction->field_name = field_name;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
ir_ref_instruction(field_name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_type_info(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value) {
|
|
IrInstSrcTypeInfo *instruction = ir_build_instruction<IrInstSrcTypeInfo>(irb, scope, source_node);
|
|
instruction->type_value = type_value;
|
|
|
|
ir_ref_instruction(type_value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_info) {
|
|
IrInstSrcType *instruction = ir_build_instruction<IrInstSrcType>(irb, scope, source_node);
|
|
instruction->type_info = type_info;
|
|
|
|
ir_ref_instruction(type_info, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_set_eval_branch_quota(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *new_quota)
|
|
{
|
|
IrInstSrcSetEvalBranchQuota *instruction = ir_build_instruction<IrInstSrcSetEvalBranchQuota>(irb, scope, source_node);
|
|
instruction->new_quota = new_quota;
|
|
|
|
ir_ref_instruction(new_quota, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_align_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *align_bytes, IrInstSrc *target)
|
|
{
|
|
IrInstSrcAlignCast *instruction = ir_build_instruction<IrInstSrcAlignCast>(irb, scope, source_node);
|
|
instruction->align_bytes = align_bytes;
|
|
instruction->target = target;
|
|
|
|
ir_ref_instruction(align_bytes, irb->current_basic_block);
|
|
ir_ref_instruction(target, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_align_cast_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
|
|
ZigType *result_type)
|
|
{
|
|
IrInstGenAlignCast *instruction = ir_build_inst_gen<IrInstGenAlignCast>(&ira->new_irb, scope, source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_resolve_result(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ResultLoc *result_loc, IrInstSrc *ty)
|
|
{
|
|
IrInstSrcResolveResult *instruction = ir_build_instruction<IrInstSrcResolveResult>(irb, scope, source_node);
|
|
instruction->result_loc = result_loc;
|
|
instruction->ty = ty;
|
|
|
|
if (ty != nullptr) ir_ref_instruction(ty, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_reset_result(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcResetResult *instruction = ir_build_instruction<IrInstSrcResetResult>(irb, scope, source_node);
|
|
instruction->result_loc = result_loc;
|
|
instruction->base.is_gen = true;
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_set_align_stack(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *align_bytes)
|
|
{
|
|
IrInstSrcSetAlignStack *instruction = ir_build_instruction<IrInstSrcSetAlignStack>(irb, scope, source_node);
|
|
instruction->align_bytes = align_bytes;
|
|
|
|
ir_ref_instruction(align_bytes, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_arg_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *fn_type, IrInstSrc *arg_index, bool allow_var)
|
|
{
|
|
IrInstSrcArgType *instruction = ir_build_instruction<IrInstSrcArgType>(irb, scope, source_node);
|
|
instruction->fn_type = fn_type;
|
|
instruction->arg_index = arg_index;
|
|
instruction->allow_var = allow_var;
|
|
|
|
ir_ref_instruction(fn_type, irb->current_basic_block);
|
|
ir_ref_instruction(arg_index, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_error_return_trace_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstErrorReturnTraceOptional optional)
|
|
{
|
|
IrInstSrcErrorReturnTrace *inst = ir_build_instruction<IrInstSrcErrorReturnTrace>(irb, scope, source_node);
|
|
inst->optional = optional;
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_error_return_trace_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
|
|
IrInstErrorReturnTraceOptional optional, ZigType *result_type)
|
|
{
|
|
IrInstGenErrorReturnTrace *inst = ir_build_inst_gen<IrInstGenErrorReturnTrace>(&ira->new_irb, scope, source_node);
|
|
inst->base.value->type = result_type;
|
|
inst->optional = optional;
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_error_union(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *err_set, IrInstSrc *payload)
|
|
{
|
|
IrInstSrcErrorUnion *instruction = ir_build_instruction<IrInstSrcErrorUnion>(irb, scope, source_node);
|
|
instruction->err_set = err_set;
|
|
instruction->payload = payload;
|
|
|
|
ir_ref_instruction(err_set, irb->current_basic_block);
|
|
ir_ref_instruction(payload, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_atomic_rmw_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *op, IrInstSrc *operand,
|
|
IrInstSrc *ordering)
|
|
{
|
|
IrInstSrcAtomicRmw *instruction = ir_build_instruction<IrInstSrcAtomicRmw>(irb, scope, source_node);
|
|
instruction->operand_type = operand_type;
|
|
instruction->ptr = ptr;
|
|
instruction->op = op;
|
|
instruction->operand = operand;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_instruction(operand_type, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(op, irb->current_basic_block);
|
|
ir_ref_instruction(operand, irb->current_basic_block);
|
|
ir_ref_instruction(ordering, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type)
|
|
{
|
|
IrInstGenAtomicRmw *instruction = ir_build_inst_gen<IrInstGenAtomicRmw>(&ira->new_irb, source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = operand_type;
|
|
instruction->ptr = ptr;
|
|
instruction->op = op;
|
|
instruction->operand = operand;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_atomic_load_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *ordering)
|
|
{
|
|
IrInstSrcAtomicLoad *instruction = ir_build_instruction<IrInstSrcAtomicLoad>(irb, scope, source_node);
|
|
instruction->operand_type = operand_type;
|
|
instruction->ptr = ptr;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_instruction(operand_type, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(ordering, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_atomic_load_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type)
|
|
{
|
|
IrInstGenAtomicLoad *instruction = ir_build_inst_gen<IrInstGenAtomicLoad>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = operand_type;
|
|
instruction->ptr = ptr;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_atomic_store_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *value, IrInstSrc *ordering)
|
|
{
|
|
IrInstSrcAtomicStore *instruction = ir_build_instruction<IrInstSrcAtomicStore>(irb, scope, source_node);
|
|
instruction->operand_type = operand_type;
|
|
instruction->ptr = ptr;
|
|
instruction->value = value;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_instruction(operand_type, irb->current_basic_block);
|
|
ir_ref_instruction(ptr, irb->current_basic_block);
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
ir_ref_instruction(ordering, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_atomic_store_gen(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering)
|
|
{
|
|
IrInstGenAtomicStore *instruction = ir_build_inst_void<IrInstGenAtomicStore>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->ptr = ptr;
|
|
instruction->value = value;
|
|
instruction->ordering = ordering;
|
|
|
|
ir_ref_inst_gen(ptr);
|
|
ir_ref_inst_gen(value);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_save_err_ret_addr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcSaveErrRetAddr *inst = ir_build_instruction<IrInstSrcSaveErrRetAddr>(irb, scope, source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_save_err_ret_addr_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenSaveErrRetAddr *inst = ir_build_inst_void<IrInstGenSaveErrRetAddr>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_add_implicit_return_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *value, ResultLocReturn *result_loc_ret)
|
|
{
|
|
IrInstSrcAddImplicitReturnType *inst = ir_build_instruction<IrInstSrcAddImplicitReturnType>(irb, scope, source_node);
|
|
inst->value = value;
|
|
inst->result_loc_ret = result_loc_ret;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_has_decl(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *container, IrInstSrc *name)
|
|
{
|
|
IrInstSrcHasDecl *instruction = ir_build_instruction<IrInstSrcHasDecl>(irb, scope, source_node);
|
|
instruction->container = container;
|
|
instruction->name = name;
|
|
|
|
ir_ref_instruction(container, irb->current_basic_block);
|
|
ir_ref_instruction(name, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_undeclared_identifier(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *name) {
|
|
IrInstSrcUndeclaredIdent *instruction = ir_build_instruction<IrInstSrcUndeclaredIdent>(irb, scope, source_node);
|
|
instruction->name = name;
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_check_runtime_scope(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *scope_is_comptime, IrInstSrc *is_comptime) {
|
|
IrInstSrcCheckRuntimeScope *instruction = ir_build_instruction<IrInstSrcCheckRuntimeScope>(irb, scope, source_node);
|
|
instruction->scope_is_comptime = scope_is_comptime;
|
|
instruction->is_comptime = is_comptime;
|
|
|
|
ir_ref_instruction(scope_is_comptime, irb->current_basic_block);
|
|
ir_ref_instruction(is_comptime, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_union_init_named_field(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *union_type, IrInstSrc *field_name, IrInstSrc *field_result_loc, IrInstSrc *result_loc)
|
|
{
|
|
IrInstSrcUnionInitNamedField *instruction = ir_build_instruction<IrInstSrcUnionInitNamedField>(irb, scope, source_node);
|
|
instruction->union_type = union_type;
|
|
instruction->field_name = field_name;
|
|
instruction->field_result_loc = field_result_loc;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(union_type, irb->current_basic_block);
|
|
ir_ref_instruction(field_name, irb->current_basic_block);
|
|
ir_ref_instruction(field_result_loc, irb->current_basic_block);
|
|
if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
|
|
static IrInstGen *ir_build_vector_to_array(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigType *result_type, IrInstGen *vector, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenVectorToArray *instruction = ir_build_inst_gen<IrInstGenVectorToArray>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->vector = vector;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(vector);
|
|
ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_ptr_of_array_to_slice(IrAnalyze *ira, IrInst *source_instruction,
|
|
ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
|
|
{
|
|
IrInstGenPtrOfArrayToSlice *instruction = ir_build_inst_gen<IrInstGenPtrOfArrayToSlice>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->operand = operand;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
ir_ref_inst_gen(result_loc);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_array_to_vector(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *array, ZigType *result_type)
|
|
{
|
|
IrInstGenArrayToVector *instruction = ir_build_inst_gen<IrInstGenArrayToVector>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->array = array;
|
|
|
|
ir_ref_inst_gen(array);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_assert_zero(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *target)
|
|
{
|
|
IrInstGenAssertZero *instruction = ir_build_inst_gen<IrInstGenAssertZero>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_void;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_assert_non_null(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *target)
|
|
{
|
|
IrInstGenAssertNonNull *instruction = ir_build_inst_gen<IrInstGenAssertNonNull>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_void;
|
|
instruction->target = target;
|
|
|
|
ir_ref_inst_gen(target);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_alloca_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *align, const char *name_hint, IrInstSrc *is_comptime)
|
|
{
|
|
IrInstSrcAlloca *instruction = ir_build_instruction<IrInstSrcAlloca>(irb, scope, source_node);
|
|
instruction->base.is_gen = true;
|
|
instruction->align = align;
|
|
instruction->name_hint = name_hint;
|
|
instruction->is_comptime = is_comptime;
|
|
|
|
if (align != nullptr) ir_ref_instruction(align, irb->current_basic_block);
|
|
if (is_comptime != nullptr) ir_ref_instruction(is_comptime, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGenAlloca *ir_build_alloca_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
uint32_t align, const char *name_hint)
|
|
{
|
|
IrInstGenAlloca *instruction = ir_create_inst_gen<IrInstGenAlloca>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->align = align;
|
|
instruction->name_hint = name_hint;
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_end_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *value, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrcEndExpr *instruction = ir_build_instruction<IrInstSrcEndExpr>(irb, scope, source_node);
|
|
instruction->base.is_gen = true;
|
|
instruction->value = value;
|
|
instruction->result_loc = result_loc;
|
|
|
|
ir_ref_instruction(value, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrcSuspendBegin *ir_build_suspend_begin_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
return ir_build_instruction<IrInstSrcSuspendBegin>(irb, scope, source_node);
|
|
}
|
|
|
|
static IrInstGen *ir_build_suspend_begin_gen(IrAnalyze *ira, IrInst *source_instr) {
|
|
IrInstGenSuspendBegin *inst = ir_build_inst_void<IrInstGenSuspendBegin>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_suspend_finish_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrcSuspendBegin *begin)
|
|
{
|
|
IrInstSrcSuspendFinish *inst = ir_build_instruction<IrInstSrcSuspendFinish>(irb, scope, source_node);
|
|
inst->begin = begin;
|
|
|
|
ir_ref_instruction(&begin->base, irb->current_basic_block);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_suspend_finish_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGenSuspendBegin *begin) {
|
|
IrInstGenSuspendFinish *inst = ir_build_inst_void<IrInstGenSuspendFinish>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
inst->begin = begin;
|
|
|
|
ir_ref_inst_gen(&begin->base);
|
|
|
|
return &inst->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_await_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *frame, ResultLoc *result_loc, bool is_nosuspend)
|
|
{
|
|
IrInstSrcAwait *instruction = ir_build_instruction<IrInstSrcAwait>(irb, scope, source_node);
|
|
instruction->frame = frame;
|
|
instruction->result_loc = result_loc;
|
|
instruction->is_nosuspend = is_nosuspend;
|
|
|
|
ir_ref_instruction(frame, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGenAwait *ir_build_await_gen(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *frame, ZigType *result_type, IrInstGen *result_loc, bool is_nosuspend)
|
|
{
|
|
IrInstGenAwait *instruction = ir_build_inst_gen<IrInstGenAwait>(&ira->new_irb,
|
|
source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->frame = frame;
|
|
instruction->result_loc = result_loc;
|
|
instruction->is_nosuspend = is_nosuspend;
|
|
|
|
ir_ref_inst_gen(frame);
|
|
if (result_loc != nullptr) ir_ref_inst_gen(result_loc);
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_resume_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *frame) {
|
|
IrInstSrcResume *instruction = ir_build_instruction<IrInstSrcResume>(irb, scope, source_node);
|
|
instruction->frame = frame;
|
|
|
|
ir_ref_instruction(frame, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_resume_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *frame) {
|
|
IrInstGenResume *instruction = ir_build_inst_void<IrInstGenResume>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->frame = frame;
|
|
|
|
ir_ref_inst_gen(frame);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrcSpillBegin *ir_build_spill_begin_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *operand, SpillId spill_id)
|
|
{
|
|
IrInstSrcSpillBegin *instruction = ir_build_instruction<IrInstSrcSpillBegin>(irb, scope, source_node);
|
|
instruction->operand = operand;
|
|
instruction->spill_id = spill_id;
|
|
|
|
ir_ref_instruction(operand, irb->current_basic_block);
|
|
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_build_spill_begin_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
|
|
SpillId spill_id)
|
|
{
|
|
IrInstGenSpillBegin *instruction = ir_build_inst_void<IrInstGenSpillBegin>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->operand = operand;
|
|
instruction->spill_id = spill_id;
|
|
|
|
ir_ref_inst_gen(operand);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_spill_end_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrcSpillBegin *begin)
|
|
{
|
|
IrInstSrcSpillEnd *instruction = ir_build_instruction<IrInstSrcSpillEnd>(irb, scope, source_node);
|
|
instruction->begin = begin;
|
|
|
|
ir_ref_instruction(&begin->base, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_spill_end_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGenSpillBegin *begin,
|
|
ZigType *result_type)
|
|
{
|
|
IrInstGenSpillEnd *instruction = ir_build_inst_gen<IrInstGenSpillEnd>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = result_type;
|
|
instruction->begin = begin;
|
|
|
|
ir_ref_inst_gen(&begin->base);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_vector_extract_elem(IrAnalyze *ira, IrInst *source_instruction,
|
|
IrInstGen *vector, IrInstGen *index)
|
|
{
|
|
IrInstGenVectorExtractElem *instruction = ir_build_inst_gen<IrInstGenVectorExtractElem>(
|
|
&ira->new_irb, source_instruction->scope, source_instruction->source_node);
|
|
instruction->base.value->type = vector->value->type->data.vector.elem_type;
|
|
instruction->vector = vector;
|
|
instruction->index = index;
|
|
|
|
ir_ref_inst_gen(vector);
|
|
ir_ref_inst_gen(index);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_wasm_memory_size_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *index) {
|
|
IrInstSrcWasmMemorySize *instruction = ir_build_instruction<IrInstSrcWasmMemorySize>(irb, scope, source_node);
|
|
instruction->index = index;
|
|
|
|
ir_ref_instruction(index, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_wasm_memory_size_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *index) {
|
|
IrInstGenWasmMemorySize *instruction = ir_build_inst_gen<IrInstGenWasmMemorySize>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_u32;
|
|
instruction->index = index;
|
|
|
|
ir_ref_inst_gen(index);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_wasm_memory_grow_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *index, IrInstSrc *delta) {
|
|
IrInstSrcWasmMemoryGrow *instruction = ir_build_instruction<IrInstSrcWasmMemoryGrow>(irb, scope, source_node);
|
|
instruction->index = index;
|
|
instruction->delta = delta;
|
|
|
|
ir_ref_instruction(index, irb->current_basic_block);
|
|
ir_ref_instruction(delta, irb->current_basic_block);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstGen *ir_build_wasm_memory_grow_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *index, IrInstGen *delta) {
|
|
IrInstGenWasmMemoryGrow *instruction = ir_build_inst_gen<IrInstGenWasmMemoryGrow>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
instruction->base.value->type = ira->codegen->builtin_types.entry_i32;
|
|
instruction->index = index;
|
|
instruction->delta = delta;
|
|
|
|
ir_ref_inst_gen(index);
|
|
ir_ref_inst_gen(delta);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_build_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
|
|
IrInstSrcSrc *instruction = ir_build_instruction<IrInstSrcSrc>(irb, scope, source_node);
|
|
|
|
return &instruction->base;
|
|
}
|
|
|
|
static void ir_count_defers(IrBuilderSrc *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
|
|
results[ReturnKindUnconditional] = 0;
|
|
results[ReturnKindError] = 0;
|
|
|
|
Scope *scope = inner_scope;
|
|
|
|
while (scope != outer_scope) {
|
|
assert(scope);
|
|
switch (scope->id) {
|
|
case ScopeIdDefer: {
|
|
AstNode *defer_node = scope->source_node;
|
|
assert(defer_node->type == NodeTypeDefer);
|
|
ReturnKind defer_kind = defer_node->data.defer.kind;
|
|
results[defer_kind] += 1;
|
|
scope = scope->parent;
|
|
continue;
|
|
}
|
|
case ScopeIdDecls:
|
|
case ScopeIdFnDef:
|
|
return;
|
|
case ScopeIdBlock:
|
|
case ScopeIdVarDecl:
|
|
case ScopeIdLoop:
|
|
case ScopeIdSuspend:
|
|
case ScopeIdCompTime:
|
|
case ScopeIdNoSuspend:
|
|
case ScopeIdRuntime:
|
|
case ScopeIdTypeOf:
|
|
case ScopeIdExpr:
|
|
scope = scope->parent;
|
|
continue;
|
|
case ScopeIdDeferExpr:
|
|
case ScopeIdCImport:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_mark_gen(IrInstSrc *instruction) {
|
|
instruction->is_gen = true;
|
|
return instruction;
|
|
}
|
|
|
|
static bool ir_gen_defers_for_block(IrBuilderSrc *irb, Scope *inner_scope, Scope *outer_scope, bool *is_noreturn, IrInstSrc *err_value) {
|
|
Scope *scope = inner_scope;
|
|
if (is_noreturn != nullptr) *is_noreturn = false;
|
|
while (scope != outer_scope) {
|
|
if (!scope)
|
|
return true;
|
|
|
|
switch (scope->id) {
|
|
case ScopeIdDefer: {
|
|
AstNode *defer_node = scope->source_node;
|
|
assert(defer_node->type == NodeTypeDefer);
|
|
ReturnKind defer_kind = defer_node->data.defer.kind;
|
|
AstNode *defer_expr_node = defer_node->data.defer.expr;
|
|
AstNode *defer_var_node = defer_node->data.defer.err_payload;
|
|
|
|
if (defer_kind == ReturnKindError && err_value == nullptr) {
|
|
// This is an `errdefer` but we're generating code for a
|
|
// `return` that doesn't return an error, skip it
|
|
scope = scope->parent;
|
|
continue;
|
|
}
|
|
|
|
Scope *defer_expr_scope = defer_node->data.defer.expr_scope;
|
|
if (defer_var_node != nullptr) {
|
|
assert(defer_kind == ReturnKindError);
|
|
assert(defer_var_node->type == NodeTypeSymbol);
|
|
Buf *var_name = defer_var_node->data.symbol_expr.symbol;
|
|
|
|
if (defer_expr_node->type == NodeTypeUnreachable) {
|
|
add_node_error(irb->codegen, defer_var_node,
|
|
buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
|
|
return false;
|
|
}
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, defer_expr_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, defer_expr_scope,
|
|
defer_expr_node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, defer_expr_scope,
|
|
defer_expr_node, err_value);
|
|
}
|
|
|
|
ZigVar *err_var = ir_create_var(irb, defer_var_node, defer_expr_scope,
|
|
var_name, true, true, false, is_comptime);
|
|
build_decl_var_and_init(irb, defer_expr_scope, defer_var_node, err_var, err_value,
|
|
buf_ptr(var_name), is_comptime);
|
|
|
|
defer_expr_scope = err_var->child_scope;
|
|
}
|
|
|
|
IrInstSrc *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope);
|
|
if (defer_expr_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
if (defer_expr_value->is_noreturn) {
|
|
if (is_noreturn != nullptr) *is_noreturn = true;
|
|
} else {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node,
|
|
defer_expr_value));
|
|
}
|
|
scope = scope->parent;
|
|
continue;
|
|
}
|
|
case ScopeIdDecls:
|
|
case ScopeIdFnDef:
|
|
return true;
|
|
case ScopeIdBlock:
|
|
case ScopeIdVarDecl:
|
|
case ScopeIdLoop:
|
|
case ScopeIdSuspend:
|
|
case ScopeIdCompTime:
|
|
case ScopeIdNoSuspend:
|
|
case ScopeIdRuntime:
|
|
case ScopeIdTypeOf:
|
|
case ScopeIdExpr:
|
|
scope = scope->parent;
|
|
continue;
|
|
case ScopeIdDeferExpr:
|
|
case ScopeIdCImport:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static void ir_set_cursor_at_end_gen(IrBuilderGen *irb, IrBasicBlockGen *basic_block) {
|
|
assert(basic_block);
|
|
irb->current_basic_block = basic_block;
|
|
}
|
|
|
|
static void ir_set_cursor_at_end(IrBuilderSrc *irb, IrBasicBlockSrc *basic_block) {
|
|
assert(basic_block);
|
|
irb->current_basic_block = basic_block;
|
|
}
|
|
|
|
static void ir_append_basic_block_gen(IrBuilderGen *irb, IrBasicBlockGen *bb) {
|
|
assert(!bb->already_appended);
|
|
bb->already_appended = true;
|
|
irb->exec->basic_block_list.append(bb);
|
|
}
|
|
|
|
static void ir_set_cursor_at_end_and_append_block_gen(IrBuilderGen *irb, IrBasicBlockGen *basic_block) {
|
|
ir_append_basic_block_gen(irb, basic_block);
|
|
ir_set_cursor_at_end_gen(irb, basic_block);
|
|
}
|
|
|
|
static void ir_set_cursor_at_end_and_append_block(IrBuilderSrc *irb, IrBasicBlockSrc *basic_block) {
|
|
basic_block->index = irb->exec->basic_block_list.length;
|
|
irb->exec->basic_block_list.append(basic_block);
|
|
ir_set_cursor_at_end(irb, basic_block);
|
|
}
|
|
|
|
static ScopeSuspend *get_scope_suspend(Scope *scope) {
|
|
while (scope) {
|
|
if (scope->id == ScopeIdSuspend)
|
|
return (ScopeSuspend *)scope;
|
|
if (scope->id == ScopeIdFnDef)
|
|
return nullptr;
|
|
|
|
scope = scope->parent;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
|
|
while (scope) {
|
|
if (scope->id == ScopeIdDeferExpr)
|
|
return (ScopeDeferExpr *)scope;
|
|
if (scope->id == ScopeIdFnDef)
|
|
return nullptr;
|
|
|
|
scope = scope->parent;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
|
|
assert(node->type == NodeTypeReturnExpr);
|
|
|
|
ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
|
|
if (scope_defer_expr) {
|
|
if (!scope_defer_expr->reported_err) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("cannot return from defer expression"));
|
|
scope_defer_expr->reported_err = true;
|
|
}
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
Scope *outer_scope = irb->exec->begin_scope;
|
|
|
|
AstNode *expr_node = node->data.return_expr.expr;
|
|
switch (node->data.return_expr.kind) {
|
|
case ReturnKindUnconditional:
|
|
{
|
|
ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
|
|
result_loc_ret->base.id = ResultLocIdReturn;
|
|
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
|
|
|
|
IrInstSrc *return_value;
|
|
if (expr_node) {
|
|
// Temporarily set this so that if we return a type it gets the name of the function
|
|
ZigFn *prev_name_fn = irb->exec->name_fn;
|
|
irb->exec->name_fn = exec_fn_entry(irb->exec);
|
|
return_value = ir_gen_node_extra(irb, expr_node, scope, LValNone, &result_loc_ret->base);
|
|
irb->exec->name_fn = prev_name_fn;
|
|
if (return_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
return_value = ir_build_const_void(irb, scope, node);
|
|
ir_build_end_expr(irb, scope, node, return_value, &result_loc_ret->base);
|
|
}
|
|
|
|
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value, result_loc_ret));
|
|
|
|
size_t defer_counts[2];
|
|
ir_count_defers(irb, scope, outer_scope, defer_counts);
|
|
bool have_err_defers = defer_counts[ReturnKindError] > 0;
|
|
if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
|
|
// only generate unconditional defers
|
|
if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
|
|
result_loc_ret->base.source_instruction = result;
|
|
return result;
|
|
}
|
|
bool should_inline = ir_should_inline(irb->exec, scope);
|
|
|
|
IrBasicBlockSrc *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
|
|
IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
|
|
|
|
IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (should_inline) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
|
|
}
|
|
|
|
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
|
|
IrBasicBlockSrc *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, err_block);
|
|
if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, return_value))
|
|
return irb->codegen->invalid_inst_src;
|
|
if (irb->codegen->have_err_ret_tracing && !should_inline) {
|
|
ir_build_save_err_ret_addr_src(irb, scope, node);
|
|
}
|
|
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, ok_block);
|
|
if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
|
|
IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
|
|
result_loc_ret->base.source_instruction = result;
|
|
return result;
|
|
}
|
|
case ReturnKindError:
|
|
{
|
|
assert(expr_node);
|
|
IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
|
|
if (err_union_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrInstSrc *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false);
|
|
|
|
IrBasicBlockSrc *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn");
|
|
IrBasicBlockSrc *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue");
|
|
IrInstSrc *is_comptime;
|
|
bool should_inline = ir_should_inline(irb->exec, scope);
|
|
if (should_inline) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, is_err_val);
|
|
}
|
|
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, return_block);
|
|
IrInstSrc *err_val_ptr = ir_build_unwrap_err_code_src(irb, scope, node, err_union_ptr);
|
|
IrInstSrc *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
|
|
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val, nullptr));
|
|
IrInstSrcSpillBegin *spill_begin = ir_build_spill_begin_src(irb, scope, node, err_val,
|
|
SpillIdRetErrCode);
|
|
ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
|
|
result_loc_ret->base.id = ResultLocIdReturn;
|
|
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
|
|
ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
|
|
|
|
bool is_noreturn = false;
|
|
if (!ir_gen_defers_for_block(irb, scope, outer_scope, &is_noreturn, err_val)) {
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
if (!is_noreturn) {
|
|
if (irb->codegen->have_err_ret_tracing && !should_inline) {
|
|
ir_build_save_err_ret_addr_src(irb, scope, node);
|
|
}
|
|
err_val = ir_build_spill_end_src(irb, scope, node, spill_begin);
|
|
IrInstSrc *ret_inst = ir_build_return_src(irb, scope, node, err_val);
|
|
result_loc_ret->base.source_instruction = ret_inst;
|
|
}
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, continue_block);
|
|
IrInstSrc *unwrapped_ptr = ir_build_unwrap_err_payload_src(irb, scope, node, err_union_ptr, false, false);
|
|
if (lval == LValPtr)
|
|
return unwrapped_ptr;
|
|
else
|
|
return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, unwrapped_ptr), result_loc);
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ZigVar *create_local_var(CodeGen *codegen, AstNode *node, Scope *parent_scope,
|
|
Buf *name, bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime,
|
|
bool skip_name_check)
|
|
{
|
|
ZigVar *variable_entry = heap::c_allocator.create<ZigVar>();
|
|
variable_entry->parent_scope = parent_scope;
|
|
variable_entry->shadowable = is_shadowable;
|
|
variable_entry->is_comptime = is_comptime;
|
|
variable_entry->src_arg_index = SIZE_MAX;
|
|
variable_entry->const_value = codegen->pass1_arena->create<ZigValue>();
|
|
|
|
if (is_comptime != nullptr) {
|
|
is_comptime->base.ref_count += 1;
|
|
}
|
|
|
|
if (name) {
|
|
variable_entry->name = strdup(buf_ptr(name));
|
|
|
|
if (!skip_name_check) {
|
|
ZigVar *existing_var = find_variable(codegen, parent_scope, name, nullptr);
|
|
if (existing_var && !existing_var->shadowable) {
|
|
if (existing_var->var_type == nullptr || !type_is_invalid(existing_var->var_type)) {
|
|
ErrorMsg *msg = add_node_error(codegen, node,
|
|
buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
|
|
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
|
|
}
|
|
variable_entry->var_type = codegen->builtin_types.entry_invalid;
|
|
} else {
|
|
ZigType *type;
|
|
if (get_primitive_type(codegen, name, &type) != ErrorPrimitiveTypeNotFound) {
|
|
add_node_error(codegen, node,
|
|
buf_sprintf("variable shadows primitive type '%s'", buf_ptr(name)));
|
|
variable_entry->var_type = codegen->builtin_types.entry_invalid;
|
|
} else {
|
|
Tld *tld = find_decl(codegen, parent_scope, name);
|
|
if (tld != nullptr) {
|
|
bool want_err_msg = true;
|
|
if (tld->id == TldIdVar) {
|
|
ZigVar *var = reinterpret_cast<TldVar *>(tld)->var;
|
|
if (var != nullptr && var->var_type != nullptr && type_is_invalid(var->var_type)) {
|
|
want_err_msg = false;
|
|
}
|
|
}
|
|
if (want_err_msg) {
|
|
ErrorMsg *msg = add_node_error(codegen, node,
|
|
buf_sprintf("redefinition of '%s'", buf_ptr(name)));
|
|
add_error_note(codegen, msg, tld->source_node, buf_sprintf("previous definition is here"));
|
|
}
|
|
variable_entry->var_type = codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
assert(is_shadowable);
|
|
// TODO make this name not actually be in scope. user should be able to make a variable called "_anon"
|
|
// might already be solved, let's just make sure it has test coverage
|
|
// maybe we put a prefix on this so the debug info doesn't clobber user debug info for same named variables
|
|
variable_entry->name = "_anon";
|
|
}
|
|
|
|
variable_entry->src_is_const = src_is_const;
|
|
variable_entry->gen_is_const = gen_is_const;
|
|
variable_entry->decl_node = node;
|
|
variable_entry->child_scope = create_var_scope(codegen, node, parent_scope, variable_entry);
|
|
|
|
return variable_entry;
|
|
}
|
|
|
|
// Set name to nullptr to make the variable anonymous (not visible to programmer).
|
|
// After you call this function var->child_scope has the variable in scope
|
|
static ZigVar *ir_create_var(IrBuilderSrc *irb, AstNode *node, Scope *scope, Buf *name,
|
|
bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime)
|
|
{
|
|
bool is_underscored = name ? buf_eql_str(name, "_") : false;
|
|
ZigVar *var = create_local_var(irb->codegen, node, scope,
|
|
(is_underscored ? nullptr : name), src_is_const, gen_is_const,
|
|
(is_underscored ? true : is_shadowable), is_comptime, false);
|
|
assert(var->child_scope);
|
|
return var;
|
|
}
|
|
|
|
static ResultLocPeer *create_peer_result(ResultLocPeerParent *peer_parent) {
|
|
ResultLocPeer *result = heap::c_allocator.create<ResultLocPeer>();
|
|
result->base.id = ResultLocIdPeer;
|
|
result->base.source_instruction = peer_parent->base.source_instruction;
|
|
result->parent = peer_parent;
|
|
result->base.allow_write_through_const = peer_parent->parent->allow_write_through_const;
|
|
return result;
|
|
}
|
|
|
|
static bool is_duplicate_label(CodeGen *g, Scope *scope, AstNode *node, Buf *name) {
|
|
if (name == nullptr) return false;
|
|
|
|
for (;;) {
|
|
if (scope == nullptr || scope->id == ScopeIdFnDef) {
|
|
break;
|
|
} else if (scope->id == ScopeIdBlock || scope->id == ScopeIdLoop) {
|
|
Buf *this_block_name = scope->id == ScopeIdBlock ? ((ScopeBlock *)scope)->name : ((ScopeLoop *)scope)->name;
|
|
if (this_block_name != nullptr && buf_eql_buf(name, this_block_name)) {
|
|
ErrorMsg *msg = add_node_error(g, node, buf_sprintf("redeclaration of label '%s'", buf_ptr(name)));
|
|
add_error_note(g, msg, scope->source_node, buf_sprintf("previous declaration is here"));
|
|
return true;
|
|
}
|
|
}
|
|
scope = scope->parent;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *block_node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(block_node->type == NodeTypeBlock);
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
|
|
if (is_duplicate_label(irb->codegen, parent_scope, block_node, block_node->data.block.name))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ScopeBlock *scope_block = create_block_scope(irb->codegen, block_node, parent_scope);
|
|
|
|
Scope *outer_block_scope = &scope_block->base;
|
|
Scope *child_scope = outer_block_scope;
|
|
|
|
ZigFn *fn_entry = scope_fn_entry(parent_scope);
|
|
if (fn_entry && fn_entry->child_scope == parent_scope) {
|
|
fn_entry->def_scope = scope_block;
|
|
}
|
|
|
|
if (block_node->data.block.statements.length == 0) {
|
|
if (scope_block->name != nullptr) {
|
|
add_node_error(irb->codegen, block_node, buf_sprintf("unused block label"));
|
|
}
|
|
// {}
|
|
return ir_lval_wrap(irb, parent_scope, ir_build_const_void(irb, child_scope, block_node), lval, result_loc);
|
|
}
|
|
|
|
if (block_node->data.block.name != nullptr) {
|
|
scope_block->lval = lval;
|
|
scope_block->incoming_blocks = &incoming_blocks;
|
|
scope_block->incoming_values = &incoming_values;
|
|
scope_block->end_block = ir_create_basic_block(irb, parent_scope, "BlockEnd");
|
|
scope_block->is_comptime = ir_build_const_bool(irb, parent_scope, block_node,
|
|
ir_should_inline(irb->exec, parent_scope));
|
|
|
|
scope_block->peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
|
|
scope_block->peer_parent->base.id = ResultLocIdPeerParent;
|
|
scope_block->peer_parent->base.source_instruction = scope_block->is_comptime;
|
|
scope_block->peer_parent->base.allow_write_through_const = result_loc->allow_write_through_const;
|
|
scope_block->peer_parent->end_bb = scope_block->end_block;
|
|
scope_block->peer_parent->is_comptime = scope_block->is_comptime;
|
|
scope_block->peer_parent->parent = result_loc;
|
|
ir_build_reset_result(irb, parent_scope, block_node, &scope_block->peer_parent->base);
|
|
}
|
|
|
|
bool is_continuation_unreachable = false;
|
|
bool found_invalid_inst = false;
|
|
IrInstSrc *noreturn_return_value = nullptr;
|
|
for (size_t i = 0; i < block_node->data.block.statements.length; i += 1) {
|
|
AstNode *statement_node = block_node->data.block.statements.at(i);
|
|
|
|
IrInstSrc *statement_value = ir_gen_node(irb, statement_node, child_scope);
|
|
if (statement_value == irb->codegen->invalid_inst_src) {
|
|
// keep generating all the elements of the block in case of error,
|
|
// we want to collect other compile errors
|
|
found_invalid_inst = true;
|
|
continue;
|
|
}
|
|
|
|
is_continuation_unreachable = instr_is_unreachable(statement_value);
|
|
if (is_continuation_unreachable) {
|
|
// keep the last noreturn statement value around in case we need to return it
|
|
noreturn_return_value = statement_value;
|
|
}
|
|
// This logic must be kept in sync with
|
|
// [STMT_EXPR_TEST_THING] <--- (search this token)
|
|
if (statement_node->type == NodeTypeDefer) {
|
|
// defer starts a new scope
|
|
child_scope = statement_node->data.defer.child_scope;
|
|
assert(child_scope);
|
|
} else if (statement_value->id == IrInstSrcIdDeclVar) {
|
|
// variable declarations start a new scope
|
|
IrInstSrcDeclVar *decl_var_instruction = (IrInstSrcDeclVar *)statement_value;
|
|
child_scope = decl_var_instruction->var->child_scope;
|
|
} else if (!is_continuation_unreachable) {
|
|
// this statement's value must be void
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, statement_node, statement_value));
|
|
}
|
|
}
|
|
|
|
if (scope_block->name != nullptr && scope_block->name_used == false) {
|
|
add_node_error(irb->codegen, block_node, buf_sprintf("unused block label"));
|
|
}
|
|
|
|
if (found_invalid_inst)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
if (is_continuation_unreachable) {
|
|
assert(noreturn_return_value != nullptr);
|
|
if (block_node->data.block.name == nullptr || incoming_blocks.length == 0) {
|
|
return noreturn_return_value;
|
|
}
|
|
|
|
if (scope_block->peer_parent != nullptr && scope_block->peer_parent->peers.length != 0) {
|
|
scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block;
|
|
}
|
|
ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
|
|
IrInstSrc *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
|
|
return ir_expr_wrap(irb, parent_scope, phi, result_loc);
|
|
} else {
|
|
incoming_blocks.append(irb->current_basic_block);
|
|
IrInstSrc *else_expr_result = ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node));
|
|
|
|
if (scope_block->peer_parent != nullptr) {
|
|
ResultLocPeer *peer_result = create_peer_result(scope_block->peer_parent);
|
|
scope_block->peer_parent->peers.append(peer_result);
|
|
ir_build_end_expr(irb, parent_scope, block_node, else_expr_result, &peer_result->base);
|
|
|
|
if (scope_block->peer_parent->peers.length != 0) {
|
|
scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block;
|
|
}
|
|
}
|
|
|
|
incoming_values.append(else_expr_result);
|
|
}
|
|
|
|
bool is_return_from_fn = block_node == irb->main_block_node;
|
|
if (!is_return_from_fn) {
|
|
if (!ir_gen_defers_for_block(irb, child_scope, outer_block_scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *result;
|
|
if (block_node->data.block.name != nullptr) {
|
|
ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime));
|
|
ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
|
|
IrInstSrc *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
|
|
result = ir_expr_wrap(irb, parent_scope, phi, result_loc);
|
|
} else {
|
|
IrInstSrc *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node));
|
|
result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
|
|
}
|
|
if (!is_return_from_fn)
|
|
return result;
|
|
|
|
// no need for save_err_ret_addr because this cannot return error
|
|
// only generate unconditional defers
|
|
|
|
ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result, nullptr));
|
|
ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
|
|
result_loc_ret->base.id = ResultLocIdReturn;
|
|
ir_build_reset_result(irb, parent_scope, block_node, &result_loc_ret->base);
|
|
ir_mark_gen(ir_build_end_expr(irb, parent_scope, block_node, result, &result_loc_ret->base));
|
|
if (!ir_gen_defers_for_block(irb, child_scope, outer_block_scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
return ir_mark_gen(ir_build_return_src(irb, child_scope, result->base.source_node, result));
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bin_op_id(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
|
|
Scope *inner_scope = scope;
|
|
if (op_id == IrBinOpArrayCat || op_id == IrBinOpArrayMult) {
|
|
inner_scope = create_comptime_scope(irb->codegen, node, scope);
|
|
}
|
|
|
|
IrInstSrc *op1 = ir_gen_node(irb, node->data.bin_op_expr.op1, inner_scope);
|
|
IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, inner_scope);
|
|
|
|
if (op1 == irb->codegen->invalid_inst_src || op2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_bin_op(irb, scope, node, op_id, op1, op2, true);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
IrInstSrc *op1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
|
|
IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
|
|
|
|
if (op1 == irb->codegen->invalid_inst_src || op2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
// TODO only pass type_name when the || operator is the top level AST node in the var decl expr
|
|
Buf bare_name = BUF_INIT;
|
|
Buf *type_name = get_anon_type_name(irb->codegen, irb->exec, "error", scope, node, &bare_name);
|
|
|
|
return ir_build_merge_err_sets(irb, scope, node, op1, op2, type_name);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_assign(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
|
|
if (lvalue == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
|
|
result_loc_inst->base.id = ResultLocIdInstruction;
|
|
result_loc_inst->base.source_instruction = lvalue;
|
|
ir_ref_instruction(lvalue, irb->current_basic_block);
|
|
ir_build_reset_result(irb, scope, node, &result_loc_inst->base);
|
|
|
|
IrInstSrc *rvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op2, scope, LValNone,
|
|
&result_loc_inst->base);
|
|
if (rvalue == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_const_void(irb, scope, node);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_assign_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
|
|
if (lvalue == irb->codegen->invalid_inst_src)
|
|
return lvalue;
|
|
IrInstSrc *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
|
|
IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
|
|
if (op2 == irb->codegen->invalid_inst_src)
|
|
return op2;
|
|
IrInstSrc *result = ir_build_merge_err_sets(irb, scope, node, op1, op2, nullptr);
|
|
ir_build_store_ptr(irb, scope, node, lvalue, result);
|
|
return ir_build_const_void(irb, scope, node);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_assign_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
|
|
IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
|
|
if (lvalue == irb->codegen->invalid_inst_src)
|
|
return lvalue;
|
|
IrInstSrc *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
|
|
IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
|
|
if (op2 == irb->codegen->invalid_inst_src)
|
|
return op2;
|
|
IrInstSrc *result = ir_build_bin_op(irb, scope, node, op_id, op1, op2, true);
|
|
ir_build_store_ptr(irb, scope, node, lvalue, result);
|
|
return ir_build_const_void(irb, scope, node);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bool_or(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeBinOpExpr);
|
|
|
|
IrInstSrc *val1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
|
|
if (val1 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *post_val1_block = irb->current_basic_block;
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, scope)) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, val1);
|
|
}
|
|
|
|
// block for when val1 == false
|
|
IrBasicBlockSrc *false_block = ir_create_basic_block(irb, scope, "BoolOrFalse");
|
|
// block for when val1 == true (don't even evaluate the second part)
|
|
IrBasicBlockSrc *true_block = ir_create_basic_block(irb, scope, "BoolOrTrue");
|
|
|
|
ir_build_cond_br(irb, scope, node, val1, true_block, false_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, false_block);
|
|
IrInstSrc *val2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
|
|
if (val2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *post_val2_block = irb->current_basic_block;
|
|
|
|
ir_build_br(irb, scope, node, true_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, true_block);
|
|
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = val1;
|
|
incoming_values[1] = val2;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = post_val1_block;
|
|
incoming_blocks[1] = post_val2_block;
|
|
|
|
return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bool_and(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeBinOpExpr);
|
|
|
|
IrInstSrc *val1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
|
|
if (val1 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *post_val1_block = irb->current_basic_block;
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, scope)) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, val1);
|
|
}
|
|
|
|
// block for when val1 == true
|
|
IrBasicBlockSrc *true_block = ir_create_basic_block(irb, scope, "BoolAndTrue");
|
|
// block for when val1 == false (don't even evaluate the second part)
|
|
IrBasicBlockSrc *false_block = ir_create_basic_block(irb, scope, "BoolAndFalse");
|
|
|
|
ir_build_cond_br(irb, scope, node, val1, true_block, false_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, true_block);
|
|
IrInstSrc *val2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
|
|
if (val2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *post_val2_block = irb->current_basic_block;
|
|
|
|
ir_build_br(irb, scope, node, false_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, false_block);
|
|
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = val1;
|
|
incoming_values[1] = val2;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = post_val1_block;
|
|
incoming_blocks[1] = post_val2_block;
|
|
|
|
return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
|
|
}
|
|
|
|
static ResultLocPeerParent *ir_build_result_peers(IrBuilderSrc *irb, IrInstSrc *cond_br_inst,
|
|
IrBasicBlockSrc *end_block, ResultLoc *parent, IrInstSrc *is_comptime)
|
|
{
|
|
ResultLocPeerParent *peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
|
|
peer_parent->base.id = ResultLocIdPeerParent;
|
|
peer_parent->base.source_instruction = cond_br_inst;
|
|
peer_parent->base.allow_write_through_const = parent->allow_write_through_const;
|
|
peer_parent->end_bb = end_block;
|
|
peer_parent->is_comptime = is_comptime;
|
|
peer_parent->parent = parent;
|
|
|
|
IrInstSrc *popped_inst = irb->current_basic_block->instruction_list.pop();
|
|
ir_assert(popped_inst == cond_br_inst, &cond_br_inst->base);
|
|
|
|
ir_build_reset_result(irb, cond_br_inst->base.scope, cond_br_inst->base.source_node, &peer_parent->base);
|
|
irb->current_basic_block->instruction_list.append(popped_inst);
|
|
|
|
return peer_parent;
|
|
}
|
|
|
|
static ResultLocPeerParent *ir_build_binary_result_peers(IrBuilderSrc *irb, IrInstSrc *cond_br_inst,
|
|
IrBasicBlockSrc *else_block, IrBasicBlockSrc *end_block, ResultLoc *parent, IrInstSrc *is_comptime)
|
|
{
|
|
ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, parent, is_comptime);
|
|
|
|
peer_parent->peers.append(create_peer_result(peer_parent));
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
|
|
peer_parent->peers.append(create_peer_result(peer_parent));
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
|
|
return peer_parent;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_orelse(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeBinOpExpr);
|
|
|
|
AstNode *op1_node = node->data.bin_op_expr.op1;
|
|
AstNode *op2_node = node->data.bin_op_expr.op2;
|
|
|
|
IrInstSrc *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr, nullptr);
|
|
if (maybe_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *maybe_val = ir_build_load_ptr(irb, parent_scope, node, maybe_ptr);
|
|
IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, parent_scope, node, maybe_val);
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, parent_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, parent_scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null);
|
|
}
|
|
|
|
IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull");
|
|
IrBasicBlockSrc *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull");
|
|
IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd");
|
|
IrInstSrc *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime);
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block,
|
|
result_loc, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, null_block);
|
|
IrInstSrc *null_result = ir_gen_node_extra(irb, op2_node, parent_scope, LValNone,
|
|
&peer_parent->peers.at(0)->base);
|
|
if (null_result == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *after_null_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(null_result))
|
|
ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, ok_block);
|
|
IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false);
|
|
IrInstSrc *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr);
|
|
ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base);
|
|
IrBasicBlockSrc *after_ok_block = irb->current_basic_block;
|
|
ir_build_br(irb, parent_scope, node, end_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = null_result;
|
|
incoming_values[1] = unwrapped_payload;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = after_null_block;
|
|
incoming_blocks[1] = after_ok_block;
|
|
IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent);
|
|
return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_error_union(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeBinOpExpr);
|
|
|
|
AstNode *op1_node = node->data.bin_op_expr.op1;
|
|
AstNode *op2_node = node->data.bin_op_expr.op2;
|
|
|
|
IrInstSrc *err_set = ir_gen_node(irb, op1_node, parent_scope);
|
|
if (err_set == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *payload = ir_gen_node(irb, op2_node, parent_scope);
|
|
if (payload == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_error_union(irb, parent_scope, node, err_set, payload);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bin_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
|
|
assert(node->type == NodeTypeBinOpExpr);
|
|
|
|
BinOpType bin_op_type = node->data.bin_op_expr.bin_op;
|
|
switch (bin_op_type) {
|
|
case BinOpTypeInvalid:
|
|
zig_unreachable();
|
|
case BinOpTypeAssign:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign(irb, scope, node), lval, result_loc);
|
|
case BinOpTypeAssignTimes:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMult), lval, result_loc);
|
|
case BinOpTypeAssignTimesWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMultWrap), lval, result_loc);
|
|
case BinOpTypeAssignDiv:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc);
|
|
case BinOpTypeAssignMod:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc);
|
|
case BinOpTypeAssignPlus:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAdd), lval, result_loc);
|
|
case BinOpTypeAssignPlusWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAddWrap), lval, result_loc);
|
|
case BinOpTypeAssignMinus:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSub), lval, result_loc);
|
|
case BinOpTypeAssignMinusWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSubWrap), lval, result_loc);
|
|
case BinOpTypeAssignBitShiftLeft:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
|
|
case BinOpTypeAssignBitShiftRight:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
|
|
case BinOpTypeAssignBitAnd:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinAnd), lval, result_loc);
|
|
case BinOpTypeAssignBitXor:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinXor), lval, result_loc);
|
|
case BinOpTypeAssignBitOr:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinOr), lval, result_loc);
|
|
case BinOpTypeAssignMergeErrorSets:
|
|
return ir_lval_wrap(irb, scope, ir_gen_assign_merge_err_sets(irb, scope, node), lval, result_loc);
|
|
case BinOpTypeBoolOr:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bool_or(irb, scope, node), lval, result_loc);
|
|
case BinOpTypeBoolAnd:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bool_and(irb, scope, node), lval, result_loc);
|
|
case BinOpTypeCmpEq:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpEq), lval, result_loc);
|
|
case BinOpTypeCmpNotEq:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpNotEq), lval, result_loc);
|
|
case BinOpTypeCmpLessThan:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessThan), lval, result_loc);
|
|
case BinOpTypeCmpGreaterThan:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterThan), lval, result_loc);
|
|
case BinOpTypeCmpLessOrEq:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessOrEq), lval, result_loc);
|
|
case BinOpTypeCmpGreaterOrEq:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterOrEq), lval, result_loc);
|
|
case BinOpTypeBinOr:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinOr), lval, result_loc);
|
|
case BinOpTypeBinXor:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinXor), lval, result_loc);
|
|
case BinOpTypeBinAnd:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinAnd), lval, result_loc);
|
|
case BinOpTypeBitShiftLeft:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
|
|
case BinOpTypeBitShiftRight:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
|
|
case BinOpTypeAdd:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAdd), lval, result_loc);
|
|
case BinOpTypeAddWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAddWrap), lval, result_loc);
|
|
case BinOpTypeSub:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSub), lval, result_loc);
|
|
case BinOpTypeSubWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSubWrap), lval, result_loc);
|
|
case BinOpTypeMult:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMult), lval, result_loc);
|
|
case BinOpTypeMultWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMultWrap), lval, result_loc);
|
|
case BinOpTypeDiv:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc);
|
|
case BinOpTypeMod:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc);
|
|
case BinOpTypeArrayCat:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayCat), lval, result_loc);
|
|
case BinOpTypeArrayMult:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult), lval, result_loc);
|
|
case BinOpTypeMergeErrorSets:
|
|
return ir_lval_wrap(irb, scope, ir_gen_merge_err_sets(irb, scope, node), lval, result_loc);
|
|
case BinOpTypeUnwrapOptional:
|
|
return ir_gen_orelse(irb, scope, node, lval, result_loc);
|
|
case BinOpTypeErrorUnion:
|
|
return ir_lval_wrap(irb, scope, ir_gen_error_union(irb, scope, node), lval, result_loc);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_int_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeIntLiteral);
|
|
|
|
return ir_build_const_bigint(irb, scope, node, node->data.int_literal.bigint);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_float_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeFloatLiteral);
|
|
|
|
if (node->data.float_literal.overflow) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("float literal out of range of any type"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
return ir_build_const_bigfloat(irb, scope, node, node->data.float_literal.bigfloat);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_char_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeCharLiteral);
|
|
|
|
return ir_build_const_uint(irb, scope, node, node->data.char_literal.value);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_null_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeNullLiteral);
|
|
|
|
return ir_build_const_null(irb, scope, node);
|
|
}
|
|
|
|
static void populate_invalid_variable_in_scope(CodeGen *g, Scope *scope, AstNode *node, Buf *var_name) {
|
|
ScopeDecls *scope_decls = nullptr;
|
|
while (scope != nullptr) {
|
|
if (scope->id == ScopeIdDecls) {
|
|
scope_decls = reinterpret_cast<ScopeDecls *>(scope);
|
|
}
|
|
scope = scope->parent;
|
|
}
|
|
TldVar *tld_var = heap::c_allocator.create<TldVar>();
|
|
init_tld(&tld_var->base, TldIdVar, var_name, VisibModPub, node, &scope_decls->base);
|
|
tld_var->base.resolution = TldResolutionInvalid;
|
|
tld_var->var = add_variable(g, node, &scope_decls->base, var_name, false,
|
|
g->invalid_inst_gen->value, &tld_var->base, g->builtin_types.entry_invalid);
|
|
scope_decls->decl_table.put(var_name, &tld_var->base);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_symbol(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
|
|
Error err;
|
|
assert(node->type == NodeTypeSymbol);
|
|
|
|
Buf *variable_name = node->data.symbol_expr.symbol;
|
|
|
|
if (buf_eql_str(variable_name, "_")) {
|
|
if (lval == LValAssign) {
|
|
IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, node);
|
|
const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
|
|
const_instruction->value->type = get_pointer_to_type(irb->codegen,
|
|
irb->codegen->builtin_types.entry_void, false);
|
|
const_instruction->value->special = ConstValSpecialStatic;
|
|
const_instruction->value->data.x_ptr.special = ConstPtrSpecialDiscard;
|
|
return &const_instruction->base;
|
|
} else {
|
|
add_node_error(irb->codegen, node, buf_sprintf("`_` may only be used to assign things to"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
}
|
|
|
|
ZigType *primitive_type;
|
|
if ((err = get_primitive_type(irb->codegen, variable_name, &primitive_type))) {
|
|
if (err == ErrorOverflow) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("primitive integer type '%s' exceeds maximum bit width of 65535",
|
|
buf_ptr(variable_name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
assert(err == ErrorPrimitiveTypeNotFound);
|
|
} else {
|
|
IrInstSrc *value = ir_build_const_type(irb, scope, node, primitive_type);
|
|
if (lval == LValPtr || lval == LValAssign) {
|
|
return ir_build_ref_src(irb, scope, node, value);
|
|
} else {
|
|
return ir_expr_wrap(irb, scope, value, result_loc);
|
|
}
|
|
}
|
|
|
|
ScopeFnDef *crossed_fndef_scope;
|
|
ZigVar *var = find_variable(irb->codegen, scope, variable_name, &crossed_fndef_scope);
|
|
if (var) {
|
|
IrInstSrc *var_ptr = ir_build_var_ptr_x(irb, scope, node, var, crossed_fndef_scope);
|
|
if (lval == LValPtr || lval == LValAssign) {
|
|
return var_ptr;
|
|
} else {
|
|
return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, var_ptr), result_loc);
|
|
}
|
|
}
|
|
|
|
Tld *tld = find_decl(irb->codegen, scope, variable_name);
|
|
if (tld) {
|
|
IrInstSrc *decl_ref = ir_build_decl_ref(irb, scope, node, tld, lval);
|
|
if (lval == LValPtr || lval == LValAssign) {
|
|
return decl_ref;
|
|
} else {
|
|
return ir_expr_wrap(irb, scope, decl_ref, result_loc);
|
|
}
|
|
}
|
|
|
|
if (get_container_scope(node->owner)->any_imports_failed) {
|
|
// skip the error message since we had a failing import in this file
|
|
// if an import breaks we don't need redundant undeclared identifier errors
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
return ir_build_undeclared_identifier(irb, scope, node, variable_name);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_array_access(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeArrayAccessExpr);
|
|
|
|
AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr;
|
|
IrInstSrc *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr, nullptr);
|
|
if (array_ref_instruction == irb->codegen->invalid_inst_src)
|
|
return array_ref_instruction;
|
|
|
|
// Create an usize-typed result location to hold the subscript value, this
|
|
// makes it possible for the compiler to infer the subscript expression type
|
|
// if needed
|
|
IrInstSrc *usize_type_inst = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, usize_type_inst, no_result_loc());
|
|
|
|
AstNode *subscript_node = node->data.array_access_expr.subscript;
|
|
IrInstSrc *subscript_value = ir_gen_node_extra(irb, subscript_node, scope, LValNone, &result_loc_cast->base);
|
|
if (subscript_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *subscript_instruction = ir_build_implicit_cast(irb, scope, subscript_node, subscript_value, result_loc_cast);
|
|
|
|
IrInstSrc *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction,
|
|
subscript_instruction, true, PtrLenSingle, nullptr);
|
|
if (lval == LValPtr || lval == LValAssign)
|
|
return ptr_instruction;
|
|
|
|
IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
|
|
return ir_expr_wrap(irb, scope, load_ptr, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_field_access(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeFieldAccessExpr);
|
|
|
|
AstNode *container_ref_node = node->data.field_access_expr.struct_expr;
|
|
Buf *field_name = node->data.field_access_expr.field_name;
|
|
|
|
IrInstSrc *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr, nullptr);
|
|
if (container_ref_instruction == irb->codegen->invalid_inst_src)
|
|
return container_ref_instruction;
|
|
|
|
return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name, false);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_overflow_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrOverflowOp op) {
|
|
assert(node->type == NodeTypeFnCallExpr);
|
|
|
|
AstNode *type_node = node->data.fn_call_expr.params.at(0);
|
|
AstNode *op1_node = node->data.fn_call_expr.params.at(1);
|
|
AstNode *op2_node = node->data.fn_call_expr.params.at(2);
|
|
AstNode *result_ptr_node = node->data.fn_call_expr.params.at(3);
|
|
|
|
|
|
IrInstSrc *type_value = ir_gen_node(irb, type_node, scope);
|
|
if (type_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *op1 = ir_gen_node(irb, op1_node, scope);
|
|
if (op1 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *op2 = ir_gen_node(irb, op2_node, scope);
|
|
if (op2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *result_ptr = ir_gen_node(irb, result_ptr_node, scope);
|
|
if (result_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_overflow_op_src(irb, scope, node, op, type_value, op1, op2, result_ptr);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_mul_add(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeFnCallExpr);
|
|
|
|
AstNode *type_node = node->data.fn_call_expr.params.at(0);
|
|
AstNode *op1_node = node->data.fn_call_expr.params.at(1);
|
|
AstNode *op2_node = node->data.fn_call_expr.params.at(2);
|
|
AstNode *op3_node = node->data.fn_call_expr.params.at(3);
|
|
|
|
IrInstSrc *type_value = ir_gen_node(irb, type_node, scope);
|
|
if (type_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *op1 = ir_gen_node(irb, op1_node, scope);
|
|
if (op1 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *op2 = ir_gen_node(irb, op2_node, scope);
|
|
if (op2 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *op3 = ir_gen_node(irb, op3_node, scope);
|
|
if (op3 == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_mul_add_src(irb, scope, node, type_value, op1, op2, op3);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_this(IrBuilderSrc *irb, Scope *orig_scope, AstNode *node) {
|
|
for (Scope *it_scope = orig_scope; it_scope != nullptr; it_scope = it_scope->parent) {
|
|
if (it_scope->id == ScopeIdDecls) {
|
|
ScopeDecls *decls_scope = (ScopeDecls *)it_scope;
|
|
ZigType *container_type = decls_scope->container_type;
|
|
if (container_type != nullptr) {
|
|
return ir_build_const_type(irb, orig_scope, node, container_type);
|
|
} else {
|
|
return ir_build_const_import(irb, orig_scope, node, decls_scope->import);
|
|
}
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_async_call(IrBuilderSrc *irb, Scope *scope, AstNode *await_node, AstNode *call_node,
|
|
LVal lval, ResultLoc *result_loc)
|
|
{
|
|
if (call_node->data.fn_call_expr.params.length != 4) {
|
|
add_node_error(irb->codegen, call_node,
|
|
buf_sprintf("expected 4 arguments, found %" ZIG_PRI_usize,
|
|
call_node->data.fn_call_expr.params.length));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
AstNode *bytes_node = call_node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *bytes = ir_gen_node(irb, bytes_node, scope);
|
|
if (bytes == irb->codegen->invalid_inst_src)
|
|
return bytes;
|
|
|
|
AstNode *ret_ptr_node = call_node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
|
|
if (ret_ptr == irb->codegen->invalid_inst_src)
|
|
return ret_ptr;
|
|
|
|
AstNode *fn_ref_node = call_node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
|
if (fn_ref == irb->codegen->invalid_inst_src)
|
|
return fn_ref;
|
|
|
|
CallModifier modifier = (await_node == nullptr) ? CallModifierAsync : CallModifierNone;
|
|
bool is_async_call_builtin = true;
|
|
AstNode *args_node = call_node->data.fn_call_expr.params.at(3);
|
|
if (args_node->type == NodeTypeContainerInitExpr) {
|
|
if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
|
|
args_node->data.container_init_expr.entries.length == 0)
|
|
{
|
|
size_t arg_count = args_node->data.container_init_expr.entries.length;
|
|
IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(arg_count);
|
|
for (size_t i = 0; i < arg_count; i += 1) {
|
|
AstNode *arg_node = args_node->data.container_init_expr.entries.at(i);
|
|
IrInstSrc *arg = ir_gen_node(irb, arg_node, scope);
|
|
if (arg == irb->codegen->invalid_inst_src)
|
|
return arg;
|
|
args[i] = arg;
|
|
}
|
|
|
|
IrInstSrc *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args,
|
|
ret_ptr, modifier, is_async_call_builtin, bytes, result_loc);
|
|
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
|
} else {
|
|
exec_add_error_node(irb->codegen, irb->exec, args_node,
|
|
buf_sprintf("TODO: @asyncCall with anon struct literal"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
}
|
|
IrInstSrc *args = ir_gen_node(irb, args_node, scope);
|
|
if (args == irb->codegen->invalid_inst_src)
|
|
return args;
|
|
|
|
IrInstSrc *call = ir_build_async_call_extra(irb, scope, call_node, modifier, fn_ref, ret_ptr, bytes, args, result_loc);
|
|
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_fn_call_with_args(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
AstNode *fn_ref_node, CallModifier modifier, IrInstSrc *options,
|
|
AstNode **args_ptr, size_t args_len, LVal lval, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
|
if (fn_ref == irb->codegen->invalid_inst_src)
|
|
return fn_ref;
|
|
|
|
IrInstSrc *fn_type = ir_build_typeof_1(irb, scope, source_node, fn_ref);
|
|
|
|
IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(args_len);
|
|
for (size_t i = 0; i < args_len; i += 1) {
|
|
AstNode *arg_node = args_ptr[i];
|
|
|
|
IrInstSrc *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
|
|
IrInstSrc *arg_type = ir_build_arg_type(irb, scope, source_node, fn_type, arg_index, true);
|
|
ResultLoc *no_result = no_result_loc();
|
|
ir_build_reset_result(irb, scope, source_node, no_result);
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);
|
|
|
|
IrInstSrc *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
|
|
if (arg == irb->codegen->invalid_inst_src)
|
|
return arg;
|
|
|
|
args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
|
|
}
|
|
|
|
IrInstSrc *fn_call;
|
|
if (options != nullptr) {
|
|
fn_call = ir_build_call_args(irb, scope, source_node, options, fn_ref, args, args_len, result_loc);
|
|
} else {
|
|
fn_call = ir_build_call_src(irb, scope, source_node, nullptr, fn_ref, args_len, args, nullptr,
|
|
modifier, false, nullptr, result_loc);
|
|
}
|
|
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_builtin_fn_call(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeFnCallExpr);
|
|
|
|
AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr;
|
|
Buf *name = fn_ref_expr->data.symbol_expr.symbol;
|
|
auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
|
|
|
|
if (!entry) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("invalid builtin function: '%s'", buf_ptr(name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
BuiltinFnEntry *builtin_fn = entry->value;
|
|
size_t actual_param_count = node->data.fn_call_expr.params.length;
|
|
|
|
if (builtin_fn->param_count != SIZE_MAX && builtin_fn->param_count != actual_param_count) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("expected %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize,
|
|
builtin_fn->param_count, actual_param_count));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
switch (builtin_fn->id) {
|
|
case BuiltinFnIdInvalid:
|
|
zig_unreachable();
|
|
case BuiltinFnIdTypeof:
|
|
{
|
|
Scope *sub_scope = create_typeof_scope(irb->codegen, node, scope);
|
|
|
|
size_t arg_count = node->data.fn_call_expr.params.length;
|
|
|
|
IrInstSrc *type_of;
|
|
|
|
if (arg_count == 0) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("expected at least 1 argument, found 0"));
|
|
return irb->codegen->invalid_inst_src;
|
|
} else if (arg_count == 1) {
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, sub_scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
type_of = ir_build_typeof_1(irb, scope, node, arg0_value);
|
|
} else {
|
|
IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(arg_count);
|
|
for (size_t i = 0; i < arg_count; i += 1) {
|
|
AstNode *arg_node = node->data.fn_call_expr.params.at(i);
|
|
IrInstSrc *arg = ir_gen_node(irb, arg_node, sub_scope);
|
|
if (arg == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
args[i] = arg;
|
|
}
|
|
|
|
type_of = ir_build_typeof_n(irb, scope, node, args, arg_count);
|
|
}
|
|
return ir_lval_wrap(irb, scope, type_of, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSetCold:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *set_cold = ir_build_set_cold(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, set_cold, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSetRuntimeSafety:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *set_safety = ir_build_set_runtime_safety(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, set_safety, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSetFloatMode:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *set_float_mode = ir_build_set_float_mode(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, set_float_mode, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSizeof:
|
|
case BuiltinFnIdBitSizeof:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *size_of = ir_build_size_of(irb, scope, node, arg0_value, builtin_fn->id == BuiltinFnIdBitSizeof);
|
|
return ir_lval_wrap(irb, scope, size_of, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdImport:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *import = ir_build_import(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, import, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCImport:
|
|
{
|
|
IrInstSrc *c_import = ir_build_c_import(irb, scope, node);
|
|
return ir_lval_wrap(irb, scope, c_import, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCInclude:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
if (!exec_c_import_buf(irb->exec)) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("C include valid only inside C import block"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *c_include = ir_build_c_include(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, c_include, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCDefine:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
if (!exec_c_import_buf(irb->exec)) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("C define valid only inside C import block"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *c_define = ir_build_c_define(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, c_define, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCUndef:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
if (!exec_c_import_buf(irb->exec)) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("C undef valid only inside C import block"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *c_undef = ir_build_c_undef(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, c_undef, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCompileErr:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *compile_err = ir_build_compile_err(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, compile_err, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCompileLog:
|
|
{
|
|
IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(actual_param_count);
|
|
|
|
for (size_t i = 0; i < actual_param_count; i += 1) {
|
|
AstNode *arg_node = node->data.fn_call_expr.params.at(i);
|
|
args[i] = ir_gen_node(irb, arg_node, scope);
|
|
if (args[i] == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *compile_log = ir_build_compile_log(irb, scope, node, actual_param_count, args);
|
|
return ir_lval_wrap(irb, scope, compile_log, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdErrName:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *err_name = ir_build_err_name(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, err_name, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdEmbedFile:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *embed_file = ir_build_embed_file(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, embed_file, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCmpxchgWeak:
|
|
case BuiltinFnIdCmpxchgStrong:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
|
IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
|
if (arg3_value == irb->codegen->invalid_inst_src)
|
|
return arg3_value;
|
|
|
|
AstNode *arg4_node = node->data.fn_call_expr.params.at(4);
|
|
IrInstSrc *arg4_value = ir_gen_node(irb, arg4_node, scope);
|
|
if (arg4_value == irb->codegen->invalid_inst_src)
|
|
return arg4_value;
|
|
|
|
AstNode *arg5_node = node->data.fn_call_expr.params.at(5);
|
|
IrInstSrc *arg5_value = ir_gen_node(irb, arg5_node, scope);
|
|
if (arg5_value == irb->codegen->invalid_inst_src)
|
|
return arg5_value;
|
|
|
|
IrInstSrc *cmpxchg = ir_build_cmpxchg_src(irb, scope, node, arg0_value, arg1_value,
|
|
arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak),
|
|
result_loc);
|
|
return ir_lval_wrap(irb, scope, cmpxchg, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdFence:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *fence = ir_build_fence(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, fence, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdReduce:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *reduce = ir_build_reduce(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, reduce, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdDivExact:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdDivTrunc:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdDivFloor:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdRem:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdMod:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSqrt:
|
|
case BuiltinFnIdSin:
|
|
case BuiltinFnIdCos:
|
|
case BuiltinFnIdExp:
|
|
case BuiltinFnIdExp2:
|
|
case BuiltinFnIdLog:
|
|
case BuiltinFnIdLog2:
|
|
case BuiltinFnIdLog10:
|
|
case BuiltinFnIdFabs:
|
|
case BuiltinFnIdFloor:
|
|
case BuiltinFnIdCeil:
|
|
case BuiltinFnIdTrunc:
|
|
case BuiltinFnIdNearbyInt:
|
|
case BuiltinFnIdRound:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *inst = ir_build_float_op_src(irb, scope, node, arg0_value, builtin_fn->id);
|
|
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdTruncate:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *truncate = ir_build_truncate(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, truncate, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdIntCast:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_int_cast(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdFloatCast:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdErrSetCast:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdIntToFloat:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_int_to_float(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdFloatToInt:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdErrToInt:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *result = ir_build_err_to_int_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdIntToErr:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *result = ir_build_int_to_err_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdBoolToInt:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *result = ir_build_bool_to_int(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdVectorType:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *vector_type = ir_build_vector_type(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, vector_type, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdShuffle:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
|
IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
|
if (arg3_value == irb->codegen->invalid_inst_src)
|
|
return arg3_value;
|
|
|
|
IrInstSrc *shuffle_vector = ir_build_shuffle_vector(irb, scope, node,
|
|
arg0_value, arg1_value, arg2_value, arg3_value);
|
|
return ir_lval_wrap(irb, scope, shuffle_vector, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSplat:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *splat = ir_build_splat_src(irb, scope, node,
|
|
arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, splat, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdMemcpy:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
IrInstSrc *ir_memcpy = ir_build_memcpy_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
|
|
return ir_lval_wrap(irb, scope, ir_memcpy, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdMemset:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
IrInstSrc *ir_memset = ir_build_memset_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
|
|
return ir_lval_wrap(irb, scope, ir_memset, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdWasmMemorySize:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *ir_wasm_memory_size = ir_build_wasm_memory_size_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, ir_wasm_memory_size, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdWasmMemoryGrow:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *ir_wasm_memory_grow = ir_build_wasm_memory_grow_src(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, ir_wasm_memory_grow, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdField:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr, nullptr);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *ptr_instruction = ir_build_field_ptr_instruction(irb, scope, node,
|
|
arg0_value, arg1_value, false);
|
|
|
|
if (lval == LValPtr || lval == LValAssign)
|
|
return ptr_instruction;
|
|
|
|
IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
|
|
return ir_expr_wrap(irb, scope, load_ptr, result_loc);
|
|
}
|
|
case BuiltinFnIdHasField:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *type_info = ir_build_has_field(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, type_info, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdTypeInfo:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *type_info = ir_build_type_info(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, type_info, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdType:
|
|
{
|
|
AstNode *arg_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg = ir_gen_node(irb, arg_node, scope);
|
|
if (arg == irb->codegen->invalid_inst_src)
|
|
return arg;
|
|
|
|
IrInstSrc *type = ir_build_type(irb, scope, node, arg);
|
|
return ir_lval_wrap(irb, scope, type, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdBreakpoint:
|
|
return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval, result_loc);
|
|
case BuiltinFnIdReturnAddress:
|
|
return ir_lval_wrap(irb, scope, ir_build_return_address_src(irb, scope, node), lval, result_loc);
|
|
case BuiltinFnIdFrameAddress:
|
|
return ir_lval_wrap(irb, scope, ir_build_frame_address_src(irb, scope, node), lval, result_loc);
|
|
case BuiltinFnIdFrameHandle:
|
|
if (!irb->exec->fn_entry) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
return ir_lval_wrap(irb, scope, ir_build_handle_src(irb, scope, node), lval, result_loc);
|
|
case BuiltinFnIdFrameType: {
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdFrameSize: {
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, frame_size, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAlignOf:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *align_of = ir_build_align_of(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, align_of, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAddWithOverflow:
|
|
return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval, result_loc);
|
|
case BuiltinFnIdSubWithOverflow:
|
|
return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval, result_loc);
|
|
case BuiltinFnIdMulWithOverflow:
|
|
return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval, result_loc);
|
|
case BuiltinFnIdShlWithOverflow:
|
|
return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval, result_loc);
|
|
case BuiltinFnIdMulAdd:
|
|
return ir_lval_wrap(irb, scope, ir_gen_mul_add(irb, scope, node), lval, result_loc);
|
|
case BuiltinFnIdTypeName:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *type_name = ir_build_type_name(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, type_name, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdPanic:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *panic = ir_build_panic_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, panic, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdPtrCast:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *ptr_cast = ir_build_ptr_cast_src(irb, scope, node, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, ptr_cast, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdBitCast:
|
|
{
|
|
AstNode *dest_type_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *dest_type = ir_gen_node(irb, dest_type_node, scope);
|
|
if (dest_type == irb->codegen->invalid_inst_src)
|
|
return dest_type;
|
|
|
|
ResultLocBitCast *result_loc_bit_cast = heap::c_allocator.create<ResultLocBitCast>();
|
|
result_loc_bit_cast->base.id = ResultLocIdBitCast;
|
|
result_loc_bit_cast->base.source_instruction = dest_type;
|
|
result_loc_bit_cast->base.allow_write_through_const = result_loc->allow_write_through_const;
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
result_loc_bit_cast->parent = result_loc;
|
|
|
|
ir_build_reset_result(irb, scope, node, &result_loc_bit_cast->base);
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node_extra(irb, arg1_node, scope, LValNone,
|
|
&result_loc_bit_cast->base);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bitcast = ir_build_bit_cast_src(irb, scope, arg1_node, arg1_value, result_loc_bit_cast);
|
|
return ir_lval_wrap(irb, scope, bitcast, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAs:
|
|
{
|
|
AstNode *dest_type_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *dest_type = ir_gen_node(irb, dest_type_node, scope);
|
|
if (dest_type == irb->codegen->invalid_inst_src)
|
|
return dest_type;
|
|
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, dest_type, result_loc);
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node_extra(irb, arg1_node, scope, LValNone,
|
|
&result_loc_cast->base);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_implicit_cast(irb, scope, node, arg1_value, result_loc_cast);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdIntToPtr:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *int_to_ptr = ir_build_int_to_ptr_src(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, int_to_ptr, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdPtrToInt:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *ptr_to_int = ir_build_ptr_to_int_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, ptr_to_int, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdTagName:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *tag_name = ir_build_tag_name_src(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, tag_name, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdTagType:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *tag_type = ir_build_tag_type(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, tag_type, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdFieldParentPtr:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
IrInstSrc *field_parent_ptr = ir_build_field_parent_ptr_src(irb, scope, node,
|
|
arg0_value, arg1_value, arg2_value);
|
|
return ir_lval_wrap(irb, scope, field_parent_ptr, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdByteOffsetOf:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *offset_of = ir_build_byte_offset_of(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdBitOffsetOf:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *offset_of = ir_build_bit_offset_of(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCall: {
|
|
// Cast the options parameter to the options type
|
|
ZigType *options_type = get_builtin_type(irb->codegen, "CallOptions");
|
|
IrInstSrc *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());
|
|
|
|
AstNode *options_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *options_inner = ir_gen_node_extra(irb, options_node, scope,
|
|
LValNone, &result_loc_cast->base);
|
|
if (options_inner == irb->codegen->invalid_inst_src)
|
|
return options_inner;
|
|
IrInstSrc *options = ir_build_implicit_cast(irb, scope, options_node, options_inner, result_loc_cast);
|
|
|
|
AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1);
|
|
AstNode *args_node = node->data.fn_call_expr.params.at(2);
|
|
if (args_node->type == NodeTypeContainerInitExpr) {
|
|
if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
|
|
args_node->data.container_init_expr.entries.length == 0)
|
|
{
|
|
return ir_gen_fn_call_with_args(irb, scope, node,
|
|
fn_ref_node, CallModifierNone, options,
|
|
args_node->data.container_init_expr.entries.items,
|
|
args_node->data.container_init_expr.entries.length,
|
|
lval, result_loc);
|
|
} else {
|
|
exec_add_error_node(irb->codegen, irb->exec, args_node,
|
|
buf_sprintf("TODO: @call with anon struct literal"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
} else {
|
|
IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
|
if (fn_ref == irb->codegen->invalid_inst_src)
|
|
return fn_ref;
|
|
|
|
IrInstSrc *args = ir_gen_node(irb, args_node, scope);
|
|
if (args == irb->codegen->invalid_inst_src)
|
|
return args;
|
|
|
|
IrInstSrc *call = ir_build_call_extra(irb, scope, node, options, fn_ref, args, result_loc);
|
|
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
|
}
|
|
}
|
|
case BuiltinFnIdAsyncCall:
|
|
return ir_gen_async_call(irb, scope, nullptr, node, lval, result_loc);
|
|
case BuiltinFnIdShlExact:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdShrExact:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true);
|
|
return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSetEvalBranchQuota:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *set_eval_branch_quota = ir_build_set_eval_branch_quota(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAlignCast:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *align_cast = ir_build_align_cast_src(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, align_cast, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdThis:
|
|
{
|
|
IrInstSrc *this_inst = ir_gen_this(irb, scope, node);
|
|
return ir_lval_wrap(irb, scope, this_inst, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSetAlignStack:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *set_align_stack = ir_build_set_align_stack(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, set_align_stack, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdExport:
|
|
{
|
|
// Cast the options parameter to the options type
|
|
ZigType *options_type = get_builtin_type(irb->codegen, "ExportOptions");
|
|
IrInstSrc *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());
|
|
|
|
AstNode *target_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *target_value = ir_gen_node(irb, target_node, scope);
|
|
if (target_value == irb->codegen->invalid_inst_src)
|
|
return target_value;
|
|
|
|
AstNode *options_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *options_value = ir_gen_node_extra(irb, options_node,
|
|
scope, LValNone, &result_loc_cast->base);
|
|
if (options_value == irb->codegen->invalid_inst_src)
|
|
return options_value;
|
|
|
|
IrInstSrc *casted_options_value = ir_build_implicit_cast(
|
|
irb, scope, options_node, options_value, result_loc_cast);
|
|
|
|
IrInstSrc *ir_export = ir_build_export(irb, scope, node, target_value, casted_options_value);
|
|
return ir_lval_wrap(irb, scope, ir_export, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdExtern:
|
|
{
|
|
// Cast the options parameter to the options type
|
|
ZigType *options_type = get_builtin_type(irb->codegen, "ExternOptions");
|
|
IrInstSrc *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
|
|
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());
|
|
|
|
AstNode *type_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *type_value = ir_gen_node(irb, type_node, scope);
|
|
if (type_value == irb->codegen->invalid_inst_src)
|
|
return type_value;
|
|
|
|
AstNode *options_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *options_value = ir_gen_node_extra(irb, options_node,
|
|
scope, LValNone, &result_loc_cast->base);
|
|
if (options_value == irb->codegen->invalid_inst_src)
|
|
return options_value;
|
|
|
|
IrInstSrc *casted_options_value = ir_build_implicit_cast(
|
|
irb, scope, options_node, options_value, result_loc_cast);
|
|
|
|
IrInstSrc *ir_extern = ir_build_extern(irb, scope, node, type_value, casted_options_value);
|
|
return ir_lval_wrap(irb, scope, ir_extern, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdErrorReturnTrace:
|
|
{
|
|
IrInstSrc *error_return_trace = ir_build_error_return_trace_src(irb, scope, node,
|
|
IrInstErrorReturnTraceNull);
|
|
return ir_lval_wrap(irb, scope, error_return_trace, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAtomicRmw:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
|
IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
|
if (arg3_value == irb->codegen->invalid_inst_src)
|
|
return arg3_value;
|
|
|
|
AstNode *arg4_node = node->data.fn_call_expr.params.at(4);
|
|
IrInstSrc *arg4_value = ir_gen_node(irb, arg4_node, scope);
|
|
if (arg4_value == irb->codegen->invalid_inst_src)
|
|
return arg4_value;
|
|
|
|
IrInstSrc *inst = ir_build_atomic_rmw_src(irb, scope, node,
|
|
arg0_value, arg1_value, arg2_value, arg3_value, arg4_value);
|
|
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAtomicLoad:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
IrInstSrc *inst = ir_build_atomic_load_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
|
|
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdAtomicStore:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
|
IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
|
if (arg2_value == irb->codegen->invalid_inst_src)
|
|
return arg2_value;
|
|
|
|
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
|
IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
|
if (arg3_value == irb->codegen->invalid_inst_src)
|
|
return arg3_value;
|
|
|
|
IrInstSrc *inst = ir_build_atomic_store_src(irb, scope, node, arg0_value, arg1_value,
|
|
arg2_value, arg3_value);
|
|
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdIntToEnum:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result = ir_build_int_to_enum_src(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdEnumToInt:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
IrInstSrc *result = ir_build_enum_to_int(irb, scope, node, arg0_value);
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdCtz:
|
|
case BuiltinFnIdPopCount:
|
|
case BuiltinFnIdClz:
|
|
case BuiltinFnIdBswap:
|
|
case BuiltinFnIdBitReverse:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *result;
|
|
switch (builtin_fn->id) {
|
|
case BuiltinFnIdCtz:
|
|
result = ir_build_ctz(irb, scope, node, arg0_value, arg1_value);
|
|
break;
|
|
case BuiltinFnIdPopCount:
|
|
result = ir_build_pop_count(irb, scope, node, arg0_value, arg1_value);
|
|
break;
|
|
case BuiltinFnIdClz:
|
|
result = ir_build_clz(irb, scope, node, arg0_value, arg1_value);
|
|
break;
|
|
case BuiltinFnIdBswap:
|
|
result = ir_build_bswap(irb, scope, node, arg0_value, arg1_value);
|
|
break;
|
|
case BuiltinFnIdBitReverse:
|
|
result = ir_build_bit_reverse(irb, scope, node, arg0_value, arg1_value);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
return ir_lval_wrap(irb, scope, result, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdHasDecl:
|
|
{
|
|
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
|
if (arg0_value == irb->codegen->invalid_inst_src)
|
|
return arg0_value;
|
|
|
|
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
|
if (arg1_value == irb->codegen->invalid_inst_src)
|
|
return arg1_value;
|
|
|
|
IrInstSrc *has_decl = ir_build_has_decl(irb, scope, node, arg0_value, arg1_value);
|
|
return ir_lval_wrap(irb, scope, has_decl, lval, result_loc);
|
|
}
|
|
case BuiltinFnIdUnionInit:
|
|
{
|
|
AstNode *union_type_node = node->data.fn_call_expr.params.at(0);
|
|
IrInstSrc *union_type_inst = ir_gen_node(irb, union_type_node, scope);
|
|
if (union_type_inst == irb->codegen->invalid_inst_src)
|
|
return union_type_inst;
|
|
|
|
AstNode *name_node = node->data.fn_call_expr.params.at(1);
|
|
IrInstSrc *name_inst = ir_gen_node(irb, name_node, scope);
|
|
if (name_inst == irb->codegen->invalid_inst_src)
|
|
return name_inst;
|
|
|
|
AstNode *init_node = node->data.fn_call_expr.params.at(2);
|
|
|
|
return ir_gen_union_init_expr(irb, scope, node, union_type_inst, name_inst, init_node,
|
|
lval, result_loc);
|
|
}
|
|
case BuiltinFnIdSrc:
|
|
{
|
|
IrInstSrc *src_inst = ir_build_src(irb, scope, node);
|
|
return ir_lval_wrap(irb, scope, src_inst, lval, result_loc);
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ScopeNoSuspend *get_scope_nosuspend(Scope *scope) {
|
|
while (scope) {
|
|
if (scope->id == ScopeIdNoSuspend)
|
|
return (ScopeNoSuspend *)scope;
|
|
if (scope->id == ScopeIdFnDef)
|
|
return nullptr;
|
|
|
|
scope = scope->parent;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_fn_call(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeFnCallExpr);
|
|
|
|
if (node->data.fn_call_expr.modifier == CallModifierBuiltin)
|
|
return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc);
|
|
|
|
bool is_nosuspend = get_scope_nosuspend(scope) != nullptr;
|
|
CallModifier modifier = node->data.fn_call_expr.modifier;
|
|
if (is_nosuspend) {
|
|
if (modifier == CallModifierAsync) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("async call in nosuspend scope"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
modifier = CallModifierNoSuspend;
|
|
}
|
|
|
|
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
|
|
return ir_gen_fn_call_with_args(irb, scope, node, fn_ref_node, modifier,
|
|
nullptr, node->data.fn_call_expr.params.items, node->data.fn_call_expr.params.length, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_if_bool_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeIfBoolExpr);
|
|
|
|
IrInstSrc *condition = ir_gen_node(irb, node->data.if_bool_expr.condition, scope);
|
|
if (condition == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, scope)) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, condition);
|
|
}
|
|
|
|
AstNode *then_node = node->data.if_bool_expr.then_block;
|
|
AstNode *else_node = node->data.if_bool_expr.else_node;
|
|
|
|
IrBasicBlockSrc *then_block = ir_create_basic_block(irb, scope, "Then");
|
|
IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "Else");
|
|
IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "EndIf");
|
|
|
|
IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, condition,
|
|
then_block, else_block, is_comptime);
|
|
ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
|
|
result_loc, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, then_block);
|
|
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, subexpr_scope, lval,
|
|
&peer_parent->peers.at(0)->base);
|
|
if (then_expr_result == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *after_then_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(then_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
IrInstSrc *else_expr_result;
|
|
if (else_node) {
|
|
else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base);
|
|
if (else_expr_result == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
else_expr_result = ir_build_const_void(irb, scope, node);
|
|
ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(else_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, endif_block);
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = then_expr_result;
|
|
incoming_values[1] = else_expr_result;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = after_then_block;
|
|
incoming_blocks[1] = after_else_block;
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_prefix_op_id_lval(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) {
|
|
assert(node->type == NodeTypePrefixOpExpr);
|
|
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
|
|
|
|
IrInstSrc *value = ir_gen_node_extra(irb, expr_node, scope, lval, nullptr);
|
|
if (value == irb->codegen->invalid_inst_src)
|
|
return value;
|
|
|
|
return ir_build_un_op(irb, scope, node, op_id, value);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_prefix_op_id(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrUnOp op_id) {
|
|
return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone);
|
|
}
|
|
|
|
static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc) {
|
|
if (inst == irb->codegen->invalid_inst_src) return inst;
|
|
ir_build_end_expr(irb, scope, inst->base.source_node, inst, result_loc);
|
|
return inst;
|
|
}
|
|
|
|
static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
// This logic must be kept in sync with
|
|
// [STMT_EXPR_TEST_THING] <--- (search this token)
|
|
if (value == irb->codegen->invalid_inst_src ||
|
|
instr_is_unreachable(value) ||
|
|
value->base.source_node->type == NodeTypeDefer ||
|
|
value->id == IrInstSrcIdDeclVar)
|
|
{
|
|
return value;
|
|
}
|
|
|
|
assert(lval != LValAssign);
|
|
if (lval == LValPtr) {
|
|
// We needed a pointer to a value, but we got a value. So we create
|
|
// an instruction which just makes a pointer of it.
|
|
return ir_build_ref_src(irb, scope, value->base.source_node, value);
|
|
} else if (result_loc != nullptr) {
|
|
return ir_expr_wrap(irb, scope, value, result_loc);
|
|
} else {
|
|
return value;
|
|
}
|
|
|
|
}
|
|
|
|
static PtrLen star_token_to_ptr_len(TokenId token_id) {
|
|
switch (token_id) {
|
|
case TokenIdStar:
|
|
case TokenIdStarStar:
|
|
return PtrLenSingle;
|
|
case TokenIdLBracket:
|
|
return PtrLenUnknown;
|
|
case TokenIdSymbol:
|
|
return PtrLenC;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_pointer_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypePointerType);
|
|
|
|
PtrLen ptr_len = star_token_to_ptr_len(node->data.pointer_type.star_token->id);
|
|
|
|
bool is_const = node->data.pointer_type.is_const;
|
|
bool is_volatile = node->data.pointer_type.is_volatile;
|
|
bool is_allow_zero = node->data.pointer_type.allow_zero_token != nullptr;
|
|
AstNode *sentinel_expr = node->data.pointer_type.sentinel;
|
|
AstNode *expr_node = node->data.pointer_type.op_expr;
|
|
AstNode *align_expr = node->data.pointer_type.align_expr;
|
|
|
|
IrInstSrc *sentinel;
|
|
if (sentinel_expr != nullptr) {
|
|
sentinel = ir_gen_node(irb, sentinel_expr, scope);
|
|
if (sentinel == irb->codegen->invalid_inst_src)
|
|
return sentinel;
|
|
} else {
|
|
sentinel = nullptr;
|
|
}
|
|
|
|
IrInstSrc *align_value;
|
|
if (align_expr != nullptr) {
|
|
align_value = ir_gen_node(irb, align_expr, scope);
|
|
if (align_value == irb->codegen->invalid_inst_src)
|
|
return align_value;
|
|
} else {
|
|
align_value = nullptr;
|
|
}
|
|
|
|
IrInstSrc *child_type = ir_gen_node(irb, expr_node, scope);
|
|
if (child_type == irb->codegen->invalid_inst_src)
|
|
return child_type;
|
|
|
|
uint32_t bit_offset_start = 0;
|
|
if (node->data.pointer_type.bit_offset_start != nullptr) {
|
|
if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
|
|
exec_add_error_node(irb->codegen, irb->exec, node,
|
|
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
bit_offset_start = bigint_as_u32(node->data.pointer_type.bit_offset_start);
|
|
}
|
|
|
|
uint32_t host_int_bytes = 0;
|
|
if (node->data.pointer_type.host_int_bytes != nullptr) {
|
|
if (!bigint_fits_in_bits(node->data.pointer_type.host_int_bytes, 32, false)) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, node->data.pointer_type.host_int_bytes, 10);
|
|
exec_add_error_node(irb->codegen, irb->exec, node,
|
|
buf_sprintf("value %s too large for u32 byte count", buf_ptr(val_buf)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
host_int_bytes = bigint_as_u32(node->data.pointer_type.host_int_bytes);
|
|
}
|
|
|
|
if (host_int_bytes != 0 && bit_offset_start >= host_int_bytes * 8) {
|
|
exec_add_error_node(irb->codegen, irb->exec, node,
|
|
buf_sprintf("bit offset starts after end of host integer"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
|
|
ptr_len, sentinel, align_value, bit_offset_start, host_int_bytes, is_allow_zero);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_catch_unreachable(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
AstNode *expr_node, LVal lval, ResultLoc *result_loc)
|
|
{
|
|
IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
|
|
if (err_union_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, scope, source_node, err_union_ptr, true, false);
|
|
if (payload_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
if (lval == LValPtr)
|
|
return payload_ptr;
|
|
|
|
IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, source_node, payload_ptr);
|
|
return ir_expr_wrap(irb, scope, load_ptr, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bool_not(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypePrefixOpExpr);
|
|
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
|
|
|
|
IrInstSrc *value = ir_gen_node(irb, expr_node, scope);
|
|
if (value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_bool_not(irb, scope, node, value);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_prefix_op_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypePrefixOpExpr);
|
|
|
|
PrefixOp prefix_op = node->data.prefix_op_expr.prefix_op;
|
|
|
|
switch (prefix_op) {
|
|
case PrefixOpInvalid:
|
|
zig_unreachable();
|
|
case PrefixOpBoolNot:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bool_not(irb, scope, node), lval, result_loc);
|
|
case PrefixOpBinNot:
|
|
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpBinNot), lval, result_loc);
|
|
case PrefixOpNegation:
|
|
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval, result_loc);
|
|
case PrefixOpNegationWrap:
|
|
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval, result_loc);
|
|
case PrefixOpOptional:
|
|
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval, result_loc);
|
|
case PrefixOpAddrOf: {
|
|
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
|
|
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr), lval, result_loc);
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_union_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
|
IrInstSrc *union_type, IrInstSrc *field_name, AstNode *expr_node,
|
|
LVal lval, ResultLoc *parent_result_loc)
|
|
{
|
|
IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, source_node, parent_result_loc, union_type);
|
|
IrInstSrc *field_ptr = ir_build_field_ptr_instruction(irb, scope, source_node, container_ptr,
|
|
field_name, true);
|
|
|
|
ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
|
|
result_loc_inst->base.id = ResultLocIdInstruction;
|
|
result_loc_inst->base.source_instruction = field_ptr;
|
|
ir_ref_instruction(field_ptr, irb->current_basic_block);
|
|
ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);
|
|
|
|
IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
|
|
&result_loc_inst->base);
|
|
if (expr_value == irb->codegen->invalid_inst_src)
|
|
return expr_value;
|
|
|
|
IrInstSrc *init_union = ir_build_union_init_named_field(irb, scope, source_node, union_type,
|
|
field_name, field_ptr, container_ptr);
|
|
|
|
return ir_lval_wrap(irb, scope, init_union, lval, parent_result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_container_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *parent_result_loc)
|
|
{
|
|
assert(node->type == NodeTypeContainerInitExpr);
|
|
|
|
AstNodeContainerInitExpr *container_init_expr = &node->data.container_init_expr;
|
|
ContainerInitKind kind = container_init_expr->kind;
|
|
|
|
ResultLocCast *result_loc_cast = nullptr;
|
|
ResultLoc *child_result_loc;
|
|
AstNode *init_array_type_source_node;
|
|
if (container_init_expr->type != nullptr) {
|
|
IrInstSrc *container_type;
|
|
if (container_init_expr->type->type == NodeTypeInferredArrayType) {
|
|
if (kind == ContainerInitKindStruct) {
|
|
add_node_error(irb->codegen, container_init_expr->type,
|
|
buf_sprintf("initializing array with struct syntax"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
IrInstSrc *sentinel;
|
|
if (container_init_expr->type->data.inferred_array_type.sentinel != nullptr) {
|
|
sentinel = ir_gen_node(irb, container_init_expr->type->data.inferred_array_type.sentinel, scope);
|
|
if (sentinel == irb->codegen->invalid_inst_src)
|
|
return sentinel;
|
|
} else {
|
|
sentinel = nullptr;
|
|
}
|
|
|
|
IrInstSrc *elem_type = ir_gen_node(irb,
|
|
container_init_expr->type->data.inferred_array_type.child_type, scope);
|
|
if (elem_type == irb->codegen->invalid_inst_src)
|
|
return elem_type;
|
|
size_t item_count = container_init_expr->entries.length;
|
|
IrInstSrc *item_count_inst = ir_build_const_usize(irb, scope, node, item_count);
|
|
container_type = ir_build_array_type(irb, scope, node, item_count_inst, sentinel, elem_type);
|
|
} else {
|
|
container_type = ir_gen_node(irb, container_init_expr->type, scope);
|
|
if (container_type == irb->codegen->invalid_inst_src)
|
|
return container_type;
|
|
}
|
|
|
|
result_loc_cast = ir_build_cast_result_loc(irb, container_type, parent_result_loc);
|
|
child_result_loc = &result_loc_cast->base;
|
|
init_array_type_source_node = container_type->base.source_node;
|
|
} else {
|
|
child_result_loc = parent_result_loc;
|
|
if (parent_result_loc->source_instruction != nullptr) {
|
|
init_array_type_source_node = parent_result_loc->source_instruction->base.source_node;
|
|
} else {
|
|
init_array_type_source_node = node;
|
|
}
|
|
}
|
|
|
|
switch (kind) {
|
|
case ContainerInitKindStruct: {
|
|
IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
|
|
nullptr);
|
|
|
|
size_t field_count = container_init_expr->entries.length;
|
|
IrInstSrcContainerInitFieldsField *fields = heap::c_allocator.allocate<IrInstSrcContainerInitFieldsField>(field_count);
|
|
for (size_t i = 0; i < field_count; i += 1) {
|
|
AstNode *entry_node = container_init_expr->entries.at(i);
|
|
assert(entry_node->type == NodeTypeStructValueField);
|
|
|
|
Buf *name = entry_node->data.struct_val_field.name;
|
|
AstNode *expr_node = entry_node->data.struct_val_field.expr;
|
|
|
|
IrInstSrc *field_ptr = ir_build_field_ptr(irb, scope, entry_node, container_ptr, name, true);
|
|
ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
|
|
result_loc_inst->base.id = ResultLocIdInstruction;
|
|
result_loc_inst->base.source_instruction = field_ptr;
|
|
result_loc_inst->base.allow_write_through_const = true;
|
|
ir_ref_instruction(field_ptr, irb->current_basic_block);
|
|
ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);
|
|
|
|
IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
|
|
&result_loc_inst->base);
|
|
if (expr_value == irb->codegen->invalid_inst_src)
|
|
return expr_value;
|
|
|
|
fields[i].name = name;
|
|
fields[i].source_node = entry_node;
|
|
fields[i].result_loc = field_ptr;
|
|
}
|
|
IrInstSrc *result = ir_build_container_init_fields(irb, scope, node, field_count,
|
|
fields, container_ptr);
|
|
|
|
if (result_loc_cast != nullptr) {
|
|
result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
|
|
}
|
|
return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
|
|
}
|
|
case ContainerInitKindArray: {
|
|
size_t item_count = container_init_expr->entries.length;
|
|
|
|
IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
|
|
nullptr);
|
|
|
|
IrInstSrc **result_locs = heap::c_allocator.allocate<IrInstSrc *>(item_count);
|
|
for (size_t i = 0; i < item_count; i += 1) {
|
|
AstNode *expr_node = container_init_expr->entries.at(i);
|
|
|
|
IrInstSrc *elem_index = ir_build_const_usize(irb, scope, expr_node, i);
|
|
IrInstSrc *elem_ptr = ir_build_elem_ptr(irb, scope, expr_node, container_ptr,
|
|
elem_index, false, PtrLenSingle, init_array_type_source_node);
|
|
ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
|
|
result_loc_inst->base.id = ResultLocIdInstruction;
|
|
result_loc_inst->base.source_instruction = elem_ptr;
|
|
result_loc_inst->base.allow_write_through_const = true;
|
|
ir_ref_instruction(elem_ptr, irb->current_basic_block);
|
|
ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);
|
|
|
|
IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
|
|
&result_loc_inst->base);
|
|
if (expr_value == irb->codegen->invalid_inst_src)
|
|
return expr_value;
|
|
|
|
result_locs[i] = elem_ptr;
|
|
}
|
|
IrInstSrc *result = ir_build_container_init_list(irb, scope, node, item_count,
|
|
result_locs, container_ptr, init_array_type_source_node);
|
|
if (result_loc_cast != nullptr) {
|
|
result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
|
|
}
|
|
return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ResultLocVar *ir_build_var_result_loc(IrBuilderSrc *irb, IrInstSrc *alloca, ZigVar *var) {
|
|
ResultLocVar *result_loc_var = heap::c_allocator.create<ResultLocVar>();
|
|
result_loc_var->base.id = ResultLocIdVar;
|
|
result_loc_var->base.source_instruction = alloca;
|
|
result_loc_var->base.allow_write_through_const = true;
|
|
result_loc_var->var = var;
|
|
|
|
ir_build_reset_result(irb, alloca->base.scope, alloca->base.source_node, &result_loc_var->base);
|
|
|
|
return result_loc_var;
|
|
}
|
|
|
|
static ResultLocCast *ir_build_cast_result_loc(IrBuilderSrc *irb, IrInstSrc *dest_type,
|
|
ResultLoc *parent_result_loc)
|
|
{
|
|
ResultLocCast *result_loc_cast = heap::c_allocator.create<ResultLocCast>();
|
|
result_loc_cast->base.id = ResultLocIdCast;
|
|
result_loc_cast->base.source_instruction = dest_type;
|
|
result_loc_cast->base.allow_write_through_const = parent_result_loc->allow_write_through_const;
|
|
ir_ref_instruction(dest_type, irb->current_basic_block);
|
|
result_loc_cast->parent = parent_result_loc;
|
|
|
|
ir_build_reset_result(irb, dest_type->base.scope, dest_type->base.source_node, &result_loc_cast->base);
|
|
|
|
return result_loc_cast;
|
|
}
|
|
|
|
static void build_decl_var_and_init(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
|
|
IrInstSrc *init, const char *name_hint, IrInstSrc *is_comptime)
|
|
{
|
|
IrInstSrc *alloca = ir_build_alloca_src(irb, scope, source_node, nullptr, name_hint, is_comptime);
|
|
ResultLocVar *var_result_loc = ir_build_var_result_loc(irb, alloca, var);
|
|
ir_build_end_expr(irb, scope, source_node, init, &var_result_loc->base);
|
|
ir_build_var_decl_src(irb, scope, source_node, var, nullptr, alloca);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_var_decl(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeVariableDeclaration);
|
|
|
|
AstNodeVariableDeclaration *variable_declaration = &node->data.variable_declaration;
|
|
|
|
if (buf_eql_str(variable_declaration->symbol, "_")) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("`_` is not a declarable symbol"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
// Used for the type expr and the align expr
|
|
Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);
|
|
|
|
IrInstSrc *type_instruction;
|
|
if (variable_declaration->type != nullptr) {
|
|
type_instruction = ir_gen_node(irb, variable_declaration->type, comptime_scope);
|
|
if (type_instruction == irb->codegen->invalid_inst_src)
|
|
return type_instruction;
|
|
} else {
|
|
type_instruction = nullptr;
|
|
}
|
|
|
|
bool is_shadowable = false;
|
|
bool is_const = variable_declaration->is_const;
|
|
bool is_extern = variable_declaration->is_extern;
|
|
|
|
bool is_comptime_scalar = ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime;
|
|
IrInstSrc *is_comptime = ir_build_const_bool(irb, scope, node, is_comptime_scalar);
|
|
ZigVar *var = ir_create_var(irb, node, scope, variable_declaration->symbol,
|
|
is_const, is_const, is_shadowable, is_comptime);
|
|
// we detect IrInstSrcDeclVar in gen_block to make sure the next node
|
|
// is inside var->child_scope
|
|
|
|
if (!is_extern && !variable_declaration->expr) {
|
|
var->var_type = irb->codegen->builtin_types.entry_invalid;
|
|
add_node_error(irb->codegen, node, buf_sprintf("variables must be initialized"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *align_value = nullptr;
|
|
if (variable_declaration->align_expr != nullptr) {
|
|
align_value = ir_gen_node(irb, variable_declaration->align_expr, comptime_scope);
|
|
if (align_value == irb->codegen->invalid_inst_src)
|
|
return align_value;
|
|
}
|
|
|
|
if (variable_declaration->section_expr != nullptr) {
|
|
add_node_error(irb->codegen, variable_declaration->section_expr,
|
|
buf_sprintf("cannot set section of local variable '%s'", buf_ptr(variable_declaration->symbol)));
|
|
}
|
|
|
|
// Parser should ensure that this never happens
|
|
assert(variable_declaration->threadlocal_tok == nullptr);
|
|
|
|
IrInstSrc *alloca = ir_build_alloca_src(irb, scope, node, align_value,
|
|
buf_ptr(variable_declaration->symbol), is_comptime);
|
|
|
|
// Create a result location for the initialization expression.
|
|
ResultLocVar *result_loc_var = ir_build_var_result_loc(irb, alloca, var);
|
|
ResultLoc *init_result_loc;
|
|
ResultLocCast *result_loc_cast;
|
|
if (type_instruction != nullptr) {
|
|
result_loc_cast = ir_build_cast_result_loc(irb, type_instruction, &result_loc_var->base);
|
|
init_result_loc = &result_loc_cast->base;
|
|
} else {
|
|
result_loc_cast = nullptr;
|
|
init_result_loc = &result_loc_var->base;
|
|
}
|
|
|
|
Scope *init_scope = is_comptime_scalar ?
|
|
create_comptime_scope(irb->codegen, variable_declaration->expr, scope) : scope;
|
|
|
|
// Temporarily set the name of the IrExecutableSrc to the VariableDeclaration
|
|
// so that the struct or enum from the init expression inherits the name.
|
|
Buf *old_exec_name = irb->exec->name;
|
|
irb->exec->name = variable_declaration->symbol;
|
|
IrInstSrc *init_value = ir_gen_node_extra(irb, variable_declaration->expr, init_scope,
|
|
LValNone, init_result_loc);
|
|
irb->exec->name = old_exec_name;
|
|
|
|
if (init_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
if (result_loc_cast != nullptr) {
|
|
IrInstSrc *implicit_cast = ir_build_implicit_cast(irb, scope, init_value->base.source_node,
|
|
init_value, result_loc_cast);
|
|
ir_build_end_expr(irb, scope, node, implicit_cast, &result_loc_var->base);
|
|
}
|
|
|
|
return ir_build_var_decl_src(irb, scope, node, var, align_value, alloca);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeWhileExpr);
|
|
|
|
AstNode *continue_expr_node = node->data.while_expr.continue_expr;
|
|
AstNode *else_node = node->data.while_expr.else_node;
|
|
|
|
IrBasicBlockSrc *cond_block = ir_create_basic_block(irb, scope, "WhileCond");
|
|
IrBasicBlockSrc *body_block = ir_create_basic_block(irb, scope, "WhileBody");
|
|
IrBasicBlockSrc *continue_block = continue_expr_node ?
|
|
ir_create_basic_block(irb, scope, "WhileContinue") : cond_block;
|
|
IrBasicBlockSrc *end_block = ir_create_basic_block(irb, scope, "WhileEnd");
|
|
IrBasicBlockSrc *else_block = else_node ?
|
|
ir_create_basic_block(irb, scope, "WhileElse") : end_block;
|
|
|
|
IrInstSrc *is_comptime = ir_build_const_bool(irb, scope, node,
|
|
ir_should_inline(irb->exec, scope) || node->data.while_expr.is_inline);
|
|
ir_build_br(irb, scope, node, cond_block, is_comptime);
|
|
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
Buf *var_symbol = node->data.while_expr.var_symbol;
|
|
Buf *err_symbol = node->data.while_expr.err_symbol;
|
|
if (err_symbol != nullptr) {
|
|
ir_set_cursor_at_end_and_append_block(irb, cond_block);
|
|
|
|
Scope *payload_scope;
|
|
AstNode *symbol_node = node; // TODO make more accurate
|
|
ZigVar *payload_var;
|
|
if (var_symbol) {
|
|
// TODO make it an error to write to payload variable
|
|
payload_var = ir_create_var(irb, symbol_node, subexpr_scope, var_symbol,
|
|
true, false, false, is_comptime);
|
|
payload_scope = payload_var->child_scope;
|
|
} else {
|
|
payload_scope = subexpr_scope;
|
|
}
|
|
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, payload_scope);
|
|
IrInstSrc *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope,
|
|
LValPtr, nullptr);
|
|
if (err_val_ptr == irb->codegen->invalid_inst_src)
|
|
return err_val_ptr;
|
|
IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr,
|
|
true, false);
|
|
IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
|
|
IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
|
|
IrInstSrc *cond_br_inst;
|
|
if (!instr_is_unreachable(is_err)) {
|
|
cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_err,
|
|
else_block, body_block, is_comptime);
|
|
cond_br_inst->is_gen = true;
|
|
} else {
|
|
// for the purposes of the source instruction to ir_build_result_peers
|
|
cond_br_inst = irb->current_basic_block->instruction_list.last();
|
|
}
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
|
|
is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, body_block);
|
|
if (var_symbol) {
|
|
IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, &spill_scope->base, symbol_node,
|
|
err_val_ptr, false, false);
|
|
IrInstSrc *var_value = node->data.while_expr.var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, symbol_node, payload_ptr);
|
|
build_decl_var_and_init(irb, payload_scope, symbol_node, payload_var, var_value, buf_ptr(var_symbol), is_comptime);
|
|
}
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
|
|
if (is_duplicate_label(irb->codegen, payload_scope, node, node->data.while_expr.name))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, payload_scope);
|
|
loop_scope->break_block = end_block;
|
|
loop_scope->continue_block = continue_block;
|
|
loop_scope->is_comptime = is_comptime;
|
|
loop_scope->incoming_blocks = &incoming_blocks;
|
|
loop_scope->incoming_values = &incoming_values;
|
|
loop_scope->lval = lval;
|
|
loop_scope->peer_parent = peer_parent;
|
|
loop_scope->spill_scope = spill_scope;
|
|
|
|
// Note the body block of the loop is not the place that lval and result_loc are used -
|
|
// it's actually in break statements, handled similarly to return statements.
|
|
// That is why we set those values in loop_scope above and not in this ir_gen_node call.
|
|
IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
|
|
if (body_result == irb->codegen->invalid_inst_src)
|
|
return body_result;
|
|
|
|
if (loop_scope->name != nullptr && loop_scope->name_used == false) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
|
|
}
|
|
|
|
if (!instr_is_unreachable(body_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result));
|
|
ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime));
|
|
}
|
|
|
|
if (continue_expr_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, continue_block);
|
|
IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, payload_scope);
|
|
if (expr_result == irb->codegen->invalid_inst_src)
|
|
return expr_result;
|
|
if (!instr_is_unreachable(expr_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, continue_expr_node, expr_result));
|
|
ir_mark_gen(ir_build_br(irb, payload_scope, node, cond_block, is_comptime));
|
|
}
|
|
}
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
assert(else_node != nullptr);
|
|
|
|
// TODO make it an error to write to error variable
|
|
AstNode *err_symbol_node = else_node; // TODO make more accurate
|
|
ZigVar *err_var = ir_create_var(irb, err_symbol_node, scope, err_symbol,
|
|
true, false, false, is_comptime);
|
|
Scope *err_scope = err_var->child_scope;
|
|
IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, err_scope, err_symbol_node, err_val_ptr);
|
|
IrInstSrc *err_value = ir_build_load_ptr(irb, err_scope, err_symbol_node, err_ptr);
|
|
build_decl_var_and_init(irb, err_scope, err_symbol_node, err_var, err_value, buf_ptr(err_symbol), is_comptime);
|
|
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
ResultLocPeer *peer_result = create_peer_result(peer_parent);
|
|
peer_parent->peers.append(peer_result);
|
|
IrInstSrc *else_result = ir_gen_node_extra(irb, else_node, err_scope, lval, &peer_result->base);
|
|
if (else_result == irb->codegen->invalid_inst_src)
|
|
return else_result;
|
|
if (!instr_is_unreachable(else_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
if (else_result) {
|
|
incoming_blocks.append(after_else_block);
|
|
incoming_values.append(else_result);
|
|
} else {
|
|
incoming_blocks.append(after_cond_block);
|
|
incoming_values.append(void_else_result);
|
|
}
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
}
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
} else if (var_symbol != nullptr) {
|
|
ir_set_cursor_at_end_and_append_block(irb, cond_block);
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
// TODO make it an error to write to payload variable
|
|
AstNode *symbol_node = node; // TODO make more accurate
|
|
|
|
ZigVar *payload_var = ir_create_var(irb, symbol_node, subexpr_scope, var_symbol,
|
|
true, false, false, is_comptime);
|
|
Scope *child_scope = payload_var->child_scope;
|
|
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, child_scope);
|
|
IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope,
|
|
LValPtr, nullptr);
|
|
if (maybe_val_ptr == irb->codegen->invalid_inst_src)
|
|
return maybe_val_ptr;
|
|
IrInstSrc *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr);
|
|
IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, scope, node->data.while_expr.condition, maybe_val);
|
|
IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
|
|
IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
|
|
IrInstSrc *cond_br_inst;
|
|
if (!instr_is_unreachable(is_non_null)) {
|
|
cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_non_null,
|
|
body_block, else_block, is_comptime);
|
|
cond_br_inst->is_gen = true;
|
|
} else {
|
|
// for the purposes of the source instruction to ir_build_result_peers
|
|
cond_br_inst = irb->current_basic_block->instruction_list.last();
|
|
}
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
|
|
is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, body_block);
|
|
IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, &spill_scope->base, symbol_node, maybe_val_ptr, false);
|
|
IrInstSrc *var_value = node->data.while_expr.var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, symbol_node, payload_ptr);
|
|
build_decl_var_and_init(irb, child_scope, symbol_node, payload_var, var_value, buf_ptr(var_symbol), is_comptime);
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
|
|
if (is_duplicate_label(irb->codegen, child_scope, node, node->data.while_expr.name))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
|
|
loop_scope->break_block = end_block;
|
|
loop_scope->continue_block = continue_block;
|
|
loop_scope->is_comptime = is_comptime;
|
|
loop_scope->incoming_blocks = &incoming_blocks;
|
|
loop_scope->incoming_values = &incoming_values;
|
|
loop_scope->lval = lval;
|
|
loop_scope->peer_parent = peer_parent;
|
|
loop_scope->spill_scope = spill_scope;
|
|
|
|
// Note the body block of the loop is not the place that lval and result_loc are used -
|
|
// it's actually in break statements, handled similarly to return statements.
|
|
// That is why we set those values in loop_scope above and not in this ir_gen_node call.
|
|
IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
|
|
if (body_result == irb->codegen->invalid_inst_src)
|
|
return body_result;
|
|
|
|
if (loop_scope->name != nullptr && loop_scope->name_used == false) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
|
|
}
|
|
|
|
if (!instr_is_unreachable(body_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result));
|
|
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
|
|
}
|
|
|
|
if (continue_expr_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, continue_block);
|
|
IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, child_scope);
|
|
if (expr_result == irb->codegen->invalid_inst_src)
|
|
return expr_result;
|
|
if (!instr_is_unreachable(expr_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, continue_expr_node, expr_result));
|
|
ir_mark_gen(ir_build_br(irb, child_scope, node, cond_block, is_comptime));
|
|
}
|
|
}
|
|
|
|
IrInstSrc *else_result = nullptr;
|
|
if (else_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
ResultLocPeer *peer_result = create_peer_result(peer_parent);
|
|
peer_parent->peers.append(peer_result);
|
|
else_result = ir_gen_node_extra(irb, else_node, scope, lval, &peer_result->base);
|
|
if (else_result == irb->codegen->invalid_inst_src)
|
|
return else_result;
|
|
if (!instr_is_unreachable(else_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
if (else_result) {
|
|
incoming_blocks.append(after_else_block);
|
|
incoming_values.append(else_result);
|
|
} else {
|
|
incoming_blocks.append(after_cond_block);
|
|
incoming_values.append(void_else_result);
|
|
}
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
}
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
} else {
|
|
ir_set_cursor_at_end_and_append_block(irb, cond_block);
|
|
IrInstSrc *cond_val = ir_gen_node(irb, node->data.while_expr.condition, scope);
|
|
if (cond_val == irb->codegen->invalid_inst_src)
|
|
return cond_val;
|
|
IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
|
|
IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
|
|
IrInstSrc *cond_br_inst;
|
|
if (!instr_is_unreachable(cond_val)) {
|
|
cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, cond_val,
|
|
body_block, else_block, is_comptime);
|
|
cond_br_inst->is_gen = true;
|
|
} else {
|
|
// for the purposes of the source instruction to ir_build_result_peers
|
|
cond_br_inst = irb->current_basic_block->instruction_list.last();
|
|
}
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
|
|
is_comptime);
|
|
ir_set_cursor_at_end_and_append_block(irb, body_block);
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
|
|
if (is_duplicate_label(irb->codegen, subexpr_scope, node, node->data.while_expr.name))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, subexpr_scope);
|
|
loop_scope->break_block = end_block;
|
|
loop_scope->continue_block = continue_block;
|
|
loop_scope->is_comptime = is_comptime;
|
|
loop_scope->incoming_blocks = &incoming_blocks;
|
|
loop_scope->incoming_values = &incoming_values;
|
|
loop_scope->lval = lval;
|
|
loop_scope->peer_parent = peer_parent;
|
|
|
|
// Note the body block of the loop is not the place that lval and result_loc are used -
|
|
// it's actually in break statements, handled similarly to return statements.
|
|
// That is why we set those values in loop_scope above and not in this ir_gen_node call.
|
|
IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
|
|
if (body_result == irb->codegen->invalid_inst_src)
|
|
return body_result;
|
|
|
|
if (loop_scope->name != nullptr && loop_scope->name_used == false) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
|
|
}
|
|
|
|
if (!instr_is_unreachable(body_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result));
|
|
ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime));
|
|
}
|
|
|
|
if (continue_expr_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, continue_block);
|
|
IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, subexpr_scope);
|
|
if (expr_result == irb->codegen->invalid_inst_src)
|
|
return expr_result;
|
|
if (!instr_is_unreachable(expr_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, scope, continue_expr_node, expr_result));
|
|
ir_mark_gen(ir_build_br(irb, scope, node, cond_block, is_comptime));
|
|
}
|
|
}
|
|
|
|
IrInstSrc *else_result = nullptr;
|
|
if (else_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
ResultLocPeer *peer_result = create_peer_result(peer_parent);
|
|
peer_parent->peers.append(peer_result);
|
|
|
|
else_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_result->base);
|
|
if (else_result == irb->codegen->invalid_inst_src)
|
|
return else_result;
|
|
if (!instr_is_unreachable(else_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
if (else_result) {
|
|
incoming_blocks.append(after_else_block);
|
|
incoming_values.append(else_result);
|
|
} else {
|
|
incoming_blocks.append(after_cond_block);
|
|
incoming_values.append(void_else_result);
|
|
}
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
}
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_for_expr(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeForExpr);
|
|
|
|
AstNode *array_node = node->data.for_expr.array_expr;
|
|
AstNode *elem_node = node->data.for_expr.elem_node;
|
|
AstNode *index_node = node->data.for_expr.index_node;
|
|
AstNode *body_node = node->data.for_expr.body;
|
|
AstNode *else_node = node->data.for_expr.else_node;
|
|
|
|
if (!elem_node) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("for loop expression missing element parameter"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
assert(elem_node->type == NodeTypeSymbol);
|
|
|
|
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, parent_scope);
|
|
|
|
IrInstSrc *array_val_ptr = ir_gen_node_extra(irb, array_node, &spill_scope->base, LValPtr, nullptr);
|
|
if (array_val_ptr == irb->codegen->invalid_inst_src)
|
|
return array_val_ptr;
|
|
|
|
IrInstSrc *is_comptime = ir_build_const_bool(irb, parent_scope, node,
|
|
ir_should_inline(irb->exec, parent_scope) || node->data.for_expr.is_inline);
|
|
|
|
AstNode *index_var_source_node;
|
|
ZigVar *index_var;
|
|
const char *index_var_name;
|
|
if (index_node) {
|
|
index_var_source_node = index_node;
|
|
Buf *index_var_name_buf = index_node->data.symbol_expr.symbol;
|
|
index_var = ir_create_var(irb, index_node, parent_scope, index_var_name_buf, true, false, false, is_comptime);
|
|
index_var_name = buf_ptr(index_var_name_buf);
|
|
} else {
|
|
index_var_source_node = node;
|
|
index_var = ir_create_var(irb, node, parent_scope, nullptr, true, false, true, is_comptime);
|
|
index_var_name = "i";
|
|
}
|
|
|
|
IrInstSrc *zero = ir_build_const_usize(irb, parent_scope, node, 0);
|
|
build_decl_var_and_init(irb, parent_scope, index_var_source_node, index_var, zero, index_var_name, is_comptime);
|
|
parent_scope = index_var->child_scope;
|
|
|
|
IrInstSrc *one = ir_build_const_usize(irb, parent_scope, node, 1);
|
|
IrInstSrc *index_ptr = ir_build_var_ptr(irb, parent_scope, node, index_var);
|
|
|
|
|
|
IrBasicBlockSrc *cond_block = ir_create_basic_block(irb, parent_scope, "ForCond");
|
|
IrBasicBlockSrc *body_block = ir_create_basic_block(irb, parent_scope, "ForBody");
|
|
IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "ForEnd");
|
|
IrBasicBlockSrc *else_block = else_node ? ir_create_basic_block(irb, parent_scope, "ForElse") : end_block;
|
|
IrBasicBlockSrc *continue_block = ir_create_basic_block(irb, parent_scope, "ForContinue");
|
|
|
|
Buf *len_field_name = buf_create_from_str("len");
|
|
IrInstSrc *len_ref = ir_build_field_ptr(irb, parent_scope, node, array_val_ptr, len_field_name, false);
|
|
IrInstSrc *len_val = ir_build_load_ptr(irb, &spill_scope->base, node, len_ref);
|
|
ir_build_br(irb, parent_scope, node, cond_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, cond_block);
|
|
IrInstSrc *index_val = ir_build_load_ptr(irb, &spill_scope->base, node, index_ptr);
|
|
IrInstSrc *cond = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpLessThan, index_val, len_val, false);
|
|
IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
|
|
IrInstSrc *void_else_value = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
|
|
IrInstSrc *cond_br_inst = ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, cond,
|
|
body_block, else_block, is_comptime));
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, body_block);
|
|
IrInstSrc *elem_ptr = ir_build_elem_ptr(irb, &spill_scope->base, node, array_val_ptr, index_val,
|
|
false, PtrLenSingle, nullptr);
|
|
// TODO make it an error to write to element variable or i variable.
|
|
Buf *elem_var_name = elem_node->data.symbol_expr.symbol;
|
|
ZigVar *elem_var = ir_create_var(irb, elem_node, parent_scope, elem_var_name, true, false, false, is_comptime);
|
|
Scope *child_scope = elem_var->child_scope;
|
|
|
|
IrInstSrc *elem_value = node->data.for_expr.elem_is_ptr ?
|
|
elem_ptr : ir_build_load_ptr(irb, &spill_scope->base, elem_node, elem_ptr);
|
|
build_decl_var_and_init(irb, parent_scope, elem_node, elem_var, elem_value, buf_ptr(elem_var_name), is_comptime);
|
|
|
|
if (is_duplicate_label(irb->codegen, child_scope, node, node->data.for_expr.name))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
|
|
loop_scope->break_block = end_block;
|
|
loop_scope->continue_block = continue_block;
|
|
loop_scope->is_comptime = is_comptime;
|
|
loop_scope->incoming_blocks = &incoming_blocks;
|
|
loop_scope->incoming_values = &incoming_values;
|
|
loop_scope->lval = LValNone;
|
|
loop_scope->peer_parent = peer_parent;
|
|
loop_scope->spill_scope = spill_scope;
|
|
|
|
// Note the body block of the loop is not the place that lval and result_loc are used -
|
|
// it's actually in break statements, handled similarly to return statements.
|
|
// That is why we set those values in loop_scope above and not in this ir_gen_node call.
|
|
IrInstSrc *body_result = ir_gen_node(irb, body_node, &loop_scope->base);
|
|
if (body_result == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
if (loop_scope->name != nullptr && loop_scope->name_used == false) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("unused for label"));
|
|
}
|
|
|
|
if (!instr_is_unreachable(body_result)) {
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.for_expr.body, body_result));
|
|
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
|
|
}
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, continue_block);
|
|
IrInstSrc *new_index_val = ir_build_bin_op(irb, child_scope, node, IrBinOpAdd, index_val, one, false);
|
|
ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val)->allow_write_through_const = true;
|
|
ir_build_br(irb, child_scope, node, cond_block, is_comptime);
|
|
|
|
IrInstSrc *else_result = nullptr;
|
|
if (else_node) {
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
ResultLocPeer *peer_result = create_peer_result(peer_parent);
|
|
peer_parent->peers.append(peer_result);
|
|
else_result = ir_gen_node_extra(irb, else_node, parent_scope, LValNone, &peer_result->base);
|
|
if (else_result == irb->codegen->invalid_inst_src)
|
|
return else_result;
|
|
if (!instr_is_unreachable(else_result))
|
|
ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
|
|
if (else_result) {
|
|
incoming_blocks.append(after_else_block);
|
|
incoming_values.append(else_result);
|
|
} else {
|
|
incoming_blocks.append(after_cond_block);
|
|
incoming_values.append(void_else_value);
|
|
}
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
}
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, peer_parent);
|
|
return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_bool_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeBoolLiteral);
|
|
return ir_build_const_bool(irb, scope, node, node->data.bool_literal.value);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_enum_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeEnumLiteral);
|
|
Buf *name = &node->data.enum_literal.identifier->data.str_lit.str;
|
|
return ir_build_const_enum_literal(irb, scope, node, name);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_string_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeStringLiteral);
|
|
|
|
return ir_build_const_str_lit(irb, scope, node, node->data.string_literal.buf);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_array_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeArrayType);
|
|
|
|
AstNode *size_node = node->data.array_type.size;
|
|
AstNode *child_type_node = node->data.array_type.child_type;
|
|
bool is_const = node->data.array_type.is_const;
|
|
bool is_volatile = node->data.array_type.is_volatile;
|
|
bool is_allow_zero = node->data.array_type.allow_zero_token != nullptr;
|
|
AstNode *sentinel_expr = node->data.array_type.sentinel;
|
|
AstNode *align_expr = node->data.array_type.align_expr;
|
|
|
|
Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);
|
|
|
|
IrInstSrc *sentinel;
|
|
if (sentinel_expr != nullptr) {
|
|
sentinel = ir_gen_node(irb, sentinel_expr, comptime_scope);
|
|
if (sentinel == irb->codegen->invalid_inst_src)
|
|
return sentinel;
|
|
} else {
|
|
sentinel = nullptr;
|
|
}
|
|
|
|
if (size_node) {
|
|
if (is_const) {
|
|
add_node_error(irb->codegen, node, buf_create_from_str("const qualifier invalid on array type"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
if (is_volatile) {
|
|
add_node_error(irb->codegen, node, buf_create_from_str("volatile qualifier invalid on array type"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
if (is_allow_zero) {
|
|
add_node_error(irb->codegen, node, buf_create_from_str("allowzero qualifier invalid on array type"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
if (align_expr != nullptr) {
|
|
add_node_error(irb->codegen, node, buf_create_from_str("align qualifier invalid on array type"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *size_value = ir_gen_node(irb, size_node, comptime_scope);
|
|
if (size_value == irb->codegen->invalid_inst_src)
|
|
return size_value;
|
|
|
|
IrInstSrc *child_type = ir_gen_node(irb, child_type_node, comptime_scope);
|
|
if (child_type == irb->codegen->invalid_inst_src)
|
|
return child_type;
|
|
|
|
return ir_build_array_type(irb, scope, node, size_value, sentinel, child_type);
|
|
} else {
|
|
IrInstSrc *align_value;
|
|
if (align_expr != nullptr) {
|
|
align_value = ir_gen_node(irb, align_expr, comptime_scope);
|
|
if (align_value == irb->codegen->invalid_inst_src)
|
|
return align_value;
|
|
} else {
|
|
align_value = nullptr;
|
|
}
|
|
|
|
IrInstSrc *child_type = ir_gen_node(irb, child_type_node, comptime_scope);
|
|
if (child_type == irb->codegen->invalid_inst_src)
|
|
return child_type;
|
|
|
|
return ir_build_slice_type(irb, scope, node, child_type, is_const, is_volatile, sentinel,
|
|
align_value, is_allow_zero);
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_anyframe_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeAnyFrameType);
|
|
|
|
AstNode *payload_type_node = node->data.anyframe_type.payload_type;
|
|
IrInstSrc *payload_type_value = nullptr;
|
|
|
|
if (payload_type_node != nullptr) {
|
|
payload_type_value = ir_gen_node(irb, payload_type_node, scope);
|
|
if (payload_type_value == irb->codegen->invalid_inst_src)
|
|
return payload_type_value;
|
|
|
|
}
|
|
|
|
return ir_build_anyframe_type(irb, scope, node, payload_type_value);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_undefined_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeUndefinedLiteral);
|
|
return ir_build_const_undefined(irb, scope, node);
|
|
}
|
|
|
|
static Error parse_asm_template(IrAnalyze *ira, AstNode *source_node, Buf *asm_template,
|
|
ZigList<AsmToken> *tok_list)
|
|
{
|
|
// TODO Connect the errors in this function back up to the actual source location
|
|
// rather than just the token. https://github.com/ziglang/zig/issues/2080
|
|
enum State {
|
|
StateStart,
|
|
StatePercent,
|
|
StateTemplate,
|
|
StateVar,
|
|
};
|
|
|
|
assert(tok_list->length == 0);
|
|
|
|
AsmToken *cur_tok = nullptr;
|
|
|
|
enum State state = StateStart;
|
|
|
|
for (size_t i = 0; i < buf_len(asm_template); i += 1) {
|
|
uint8_t c = *((uint8_t*)buf_ptr(asm_template) + i);
|
|
switch (state) {
|
|
case StateStart:
|
|
if (c == '%') {
|
|
tok_list->add_one();
|
|
cur_tok = &tok_list->last();
|
|
cur_tok->id = AsmTokenIdPercent;
|
|
cur_tok->start = i;
|
|
state = StatePercent;
|
|
} else {
|
|
tok_list->add_one();
|
|
cur_tok = &tok_list->last();
|
|
cur_tok->id = AsmTokenIdTemplate;
|
|
cur_tok->start = i;
|
|
state = StateTemplate;
|
|
}
|
|
break;
|
|
case StatePercent:
|
|
if (c == '%') {
|
|
cur_tok->end = i;
|
|
state = StateStart;
|
|
} else if (c == '[') {
|
|
cur_tok->id = AsmTokenIdVar;
|
|
state = StateVar;
|
|
} else if (c == '=') {
|
|
cur_tok->id = AsmTokenIdUniqueId;
|
|
cur_tok->end = i;
|
|
state = StateStart;
|
|
} else {
|
|
add_node_error(ira->codegen, source_node,
|
|
buf_create_from_str("expected a '%' or '['"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
break;
|
|
case StateTemplate:
|
|
if (c == '%') {
|
|
cur_tok->end = i;
|
|
i -= 1;
|
|
cur_tok = nullptr;
|
|
state = StateStart;
|
|
}
|
|
break;
|
|
case StateVar:
|
|
if (c == ']') {
|
|
cur_tok->end = i;
|
|
state = StateStart;
|
|
} else if ((c >= 'a' && c <= 'z') ||
|
|
(c >= '0' && c <= '9') ||
|
|
(c == '_'))
|
|
{
|
|
// do nothing
|
|
} else {
|
|
add_node_error(ira->codegen, source_node,
|
|
buf_sprintf("invalid substitution character: '%c'", c));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
switch (state) {
|
|
case StateStart:
|
|
break;
|
|
case StatePercent:
|
|
case StateVar:
|
|
add_node_error(ira->codegen, source_node, buf_sprintf("unexpected end of assembly template"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case StateTemplate:
|
|
cur_tok->end = buf_len(asm_template);
|
|
break;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
|
|
static size_t find_asm_index(CodeGen *g, AstNode *node, AsmToken *tok, Buf *src_template) {
|
|
const char *ptr = buf_ptr(src_template) + tok->start + 2;
|
|
size_t len = tok->end - tok->start - 2;
|
|
size_t result = 0;
|
|
for (size_t i = 0; i < node->data.asm_expr.output_list.length; i += 1, result += 1) {
|
|
AsmOutput *asm_output = node->data.asm_expr.output_list.at(i);
|
|
if (buf_eql_mem(asm_output->asm_symbolic_name, ptr, len)) {
|
|
return result;
|
|
}
|
|
}
|
|
for (size_t i = 0; i < node->data.asm_expr.input_list.length; i += 1, result += 1) {
|
|
AsmInput *asm_input = node->data.asm_expr.input_list.at(i);
|
|
if (buf_eql_mem(asm_input->asm_symbolic_name, ptr, len)) {
|
|
return result;
|
|
}
|
|
}
|
|
return SIZE_MAX;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_asm_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeAsmExpr);
|
|
AstNodeAsmExpr *asm_expr = &node->data.asm_expr;
|
|
|
|
IrInstSrc *asm_template = ir_gen_node(irb, asm_expr->asm_template, scope);
|
|
if (asm_template == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
bool is_volatile = asm_expr->volatile_token != nullptr;
|
|
bool in_fn_scope = (scope_fn_entry(scope) != nullptr);
|
|
|
|
if (!in_fn_scope) {
|
|
if (is_volatile) {
|
|
add_token_error(irb->codegen, node->owner, asm_expr->volatile_token,
|
|
buf_sprintf("volatile is meaningless on global assembly"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
if (asm_expr->output_list.length != 0 || asm_expr->input_list.length != 0 ||
|
|
asm_expr->clobber_list.length != 0)
|
|
{
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("global assembly cannot have inputs, outputs, or clobbers"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
return ir_build_asm_src(irb, scope, node, asm_template, nullptr, nullptr,
|
|
nullptr, 0, is_volatile, true);
|
|
}
|
|
|
|
IrInstSrc **input_list = heap::c_allocator.allocate<IrInstSrc *>(asm_expr->input_list.length);
|
|
IrInstSrc **output_types = heap::c_allocator.allocate<IrInstSrc *>(asm_expr->output_list.length);
|
|
ZigVar **output_vars = heap::c_allocator.allocate<ZigVar *>(asm_expr->output_list.length);
|
|
size_t return_count = 0;
|
|
if (!is_volatile && asm_expr->output_list.length == 0) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("assembly expression with no output must be marked volatile"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
for (size_t i = 0; i < asm_expr->output_list.length; i += 1) {
|
|
AsmOutput *asm_output = asm_expr->output_list.at(i);
|
|
if (asm_output->return_type) {
|
|
return_count += 1;
|
|
|
|
IrInstSrc *return_type = ir_gen_node(irb, asm_output->return_type, scope);
|
|
if (return_type == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
if (return_count > 1) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("inline assembly allows up to one output value"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
output_types[i] = return_type;
|
|
} else {
|
|
Buf *variable_name = asm_output->variable_name;
|
|
// TODO there is some duplication here with ir_gen_symbol. I need to do a full audit of how
|
|
// inline assembly works. https://github.com/ziglang/zig/issues/215
|
|
ZigVar *var = find_variable(irb->codegen, scope, variable_name, nullptr);
|
|
if (var) {
|
|
output_vars[i] = var;
|
|
} else {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("use of undeclared identifier '%s'", buf_ptr(variable_name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
}
|
|
|
|
const char modifier = *buf_ptr(asm_output->constraint);
|
|
if (modifier != '=') {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("invalid modifier starting output constraint for '%s': '%c', only '=' is supported."
|
|
" Compiler TODO: see https://github.com/ziglang/zig/issues/215",
|
|
buf_ptr(asm_output->asm_symbolic_name), modifier));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
}
|
|
for (size_t i = 0; i < asm_expr->input_list.length; i += 1) {
|
|
AsmInput *asm_input = asm_expr->input_list.at(i);
|
|
IrInstSrc *input_value = ir_gen_node(irb, asm_input->expr, scope);
|
|
if (input_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
input_list[i] = input_value;
|
|
}
|
|
|
|
return ir_build_asm_src(irb, scope, node, asm_template, input_list, output_types,
|
|
output_vars, return_count, is_volatile, false);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_if_optional_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeIfOptional);
|
|
|
|
Buf *var_symbol = node->data.test_expr.var_symbol;
|
|
AstNode *expr_node = node->data.test_expr.target_node;
|
|
AstNode *then_node = node->data.test_expr.then_node;
|
|
AstNode *else_node = node->data.test_expr.else_node;
|
|
bool var_is_ptr = node->data.test_expr.var_is_ptr;
|
|
|
|
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, expr_node, scope);
|
|
spill_scope->spill_harder = true;
|
|
|
|
IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, &spill_scope->base, LValPtr, nullptr);
|
|
if (maybe_val_ptr == irb->codegen->invalid_inst_src)
|
|
return maybe_val_ptr;
|
|
|
|
IrInstSrc *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr);
|
|
IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, scope, node, maybe_val);
|
|
|
|
IrBasicBlockSrc *then_block = ir_create_basic_block(irb, scope, "OptionalThen");
|
|
IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "OptionalElse");
|
|
IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf");
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, scope)) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, is_non_null);
|
|
}
|
|
IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, is_non_null,
|
|
then_block, else_block, is_comptime);
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
|
|
result_loc, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, then_block);
|
|
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
|
|
Scope *var_scope;
|
|
if (var_symbol) {
|
|
bool is_shadowable = false;
|
|
bool is_const = true;
|
|
ZigVar *var = ir_create_var(irb, node, subexpr_scope,
|
|
var_symbol, is_const, is_const, is_shadowable, is_comptime);
|
|
|
|
IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false);
|
|
IrInstSrc *var_value = var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, node, payload_ptr);
|
|
build_decl_var_and_init(irb, subexpr_scope, node, var, var_value, buf_ptr(var_symbol), is_comptime);
|
|
var_scope = var->child_scope;
|
|
} else {
|
|
var_scope = subexpr_scope;
|
|
}
|
|
IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval,
|
|
&peer_parent->peers.at(0)->base);
|
|
if (then_expr_result == irb->codegen->invalid_inst_src)
|
|
return then_expr_result;
|
|
IrBasicBlockSrc *after_then_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(then_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
IrInstSrc *else_expr_result;
|
|
if (else_node) {
|
|
else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base);
|
|
if (else_expr_result == irb->codegen->invalid_inst_src)
|
|
return else_expr_result;
|
|
} else {
|
|
else_expr_result = ir_build_const_void(irb, scope, node);
|
|
ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(else_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, endif_block);
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = then_expr_result;
|
|
incoming_values[1] = else_expr_result;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = after_then_block;
|
|
incoming_blocks[1] = after_else_block;
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_if_err_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeIfErrorExpr);
|
|
|
|
AstNode *target_node = node->data.if_err_expr.target_node;
|
|
AstNode *then_node = node->data.if_err_expr.then_node;
|
|
AstNode *else_node = node->data.if_err_expr.else_node;
|
|
bool var_is_ptr = node->data.if_err_expr.var_is_ptr;
|
|
bool var_is_const = true;
|
|
Buf *var_symbol = node->data.if_err_expr.var_symbol;
|
|
Buf *err_symbol = node->data.if_err_expr.err_symbol;
|
|
|
|
IrInstSrc *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr);
|
|
if (err_val_ptr == irb->codegen->invalid_inst_src)
|
|
return err_val_ptr;
|
|
|
|
IrInstSrc *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
|
|
IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false);
|
|
|
|
IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, scope, "TryOk");
|
|
IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "TryElse");
|
|
IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "TryEnd");
|
|
|
|
bool force_comptime = ir_should_inline(irb->exec, scope);
|
|
IrInstSrc *is_comptime = force_comptime ? ir_build_const_bool(irb, scope, node, true) : ir_build_test_comptime(irb, scope, node, is_err);
|
|
IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, is_err, else_block, ok_block, is_comptime);
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
|
|
result_loc, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, ok_block);
|
|
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
Scope *var_scope;
|
|
if (var_symbol) {
|
|
bool is_shadowable = false;
|
|
IrInstSrc *var_is_comptime = force_comptime ? ir_build_const_bool(irb, subexpr_scope, node, true) : ir_build_test_comptime(irb, subexpr_scope, node, err_val);
|
|
ZigVar *var = ir_create_var(irb, node, subexpr_scope,
|
|
var_symbol, var_is_const, var_is_const, is_shadowable, var_is_comptime);
|
|
|
|
IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, subexpr_scope, node, err_val_ptr, false, false);
|
|
IrInstSrc *var_value = var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, subexpr_scope, node, payload_ptr);
|
|
build_decl_var_and_init(irb, subexpr_scope, node, var, var_value, buf_ptr(var_symbol), var_is_comptime);
|
|
var_scope = var->child_scope;
|
|
} else {
|
|
var_scope = subexpr_scope;
|
|
}
|
|
IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval,
|
|
&peer_parent->peers.at(0)->base);
|
|
if (then_expr_result == irb->codegen->invalid_inst_src)
|
|
return then_expr_result;
|
|
IrBasicBlockSrc *after_then_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(then_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
|
|
IrInstSrc *else_expr_result;
|
|
if (else_node) {
|
|
Scope *err_var_scope;
|
|
if (err_symbol) {
|
|
bool is_shadowable = false;
|
|
bool is_const = true;
|
|
ZigVar *var = ir_create_var(irb, node, subexpr_scope,
|
|
err_symbol, is_const, is_const, is_shadowable, is_comptime);
|
|
|
|
IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, subexpr_scope, node, err_val_ptr);
|
|
IrInstSrc *err_value = ir_build_load_ptr(irb, subexpr_scope, node, err_ptr);
|
|
build_decl_var_and_init(irb, subexpr_scope, node, var, err_value, buf_ptr(err_symbol), is_comptime);
|
|
err_var_scope = var->child_scope;
|
|
} else {
|
|
err_var_scope = subexpr_scope;
|
|
}
|
|
else_expr_result = ir_gen_node_extra(irb, else_node, err_var_scope, lval, &peer_parent->peers.at(1)->base);
|
|
if (else_expr_result == irb->codegen->invalid_inst_src)
|
|
return else_expr_result;
|
|
} else {
|
|
else_expr_result = ir_build_const_void(irb, scope, node);
|
|
ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
|
|
}
|
|
IrBasicBlockSrc *after_else_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(else_expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, endif_block);
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = then_expr_result;
|
|
incoming_values[1] = else_expr_result;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = after_then_block;
|
|
incoming_blocks[1] = after_else_block;
|
|
|
|
IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
|
|
return ir_expr_wrap(irb, scope, phi, result_loc);
|
|
}
|
|
|
|
static bool ir_gen_switch_prong_expr(IrBuilderSrc *irb, Scope *scope, AstNode *switch_node, AstNode *prong_node,
|
|
IrBasicBlockSrc *end_block, IrInstSrc *is_comptime, IrInstSrc *var_is_comptime,
|
|
IrInstSrc *target_value_ptr, IrInstSrc **prong_values, size_t prong_values_len,
|
|
ZigList<IrBasicBlockSrc *> *incoming_blocks, ZigList<IrInstSrc *> *incoming_values,
|
|
IrInstSrcSwitchElseVar **out_switch_else_var, LVal lval, ResultLoc *result_loc)
|
|
{
|
|
assert(switch_node->type == NodeTypeSwitchExpr);
|
|
assert(prong_node->type == NodeTypeSwitchProng);
|
|
|
|
AstNode *expr_node = prong_node->data.switch_prong.expr;
|
|
AstNode *var_symbol_node = prong_node->data.switch_prong.var_symbol;
|
|
Scope *child_scope;
|
|
if (var_symbol_node) {
|
|
assert(var_symbol_node->type == NodeTypeSymbol);
|
|
Buf *var_name = var_symbol_node->data.symbol_expr.symbol;
|
|
bool var_is_ptr = prong_node->data.switch_prong.var_is_ptr;
|
|
|
|
bool is_shadowable = false;
|
|
bool is_const = true;
|
|
ZigVar *var = ir_create_var(irb, var_symbol_node, scope,
|
|
var_name, is_const, is_const, is_shadowable, var_is_comptime);
|
|
child_scope = var->child_scope;
|
|
IrInstSrc *var_value;
|
|
if (out_switch_else_var != nullptr) {
|
|
IrInstSrcSwitchElseVar *switch_else_var = ir_build_switch_else_var(irb, scope, var_symbol_node,
|
|
target_value_ptr);
|
|
*out_switch_else_var = switch_else_var;
|
|
IrInstSrc *payload_ptr = &switch_else_var->base;
|
|
var_value = var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, payload_ptr);
|
|
} else if (prong_values != nullptr) {
|
|
IrInstSrc *payload_ptr = ir_build_switch_var(irb, scope, var_symbol_node, target_value_ptr,
|
|
prong_values, prong_values_len);
|
|
var_value = var_is_ptr ?
|
|
payload_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, payload_ptr);
|
|
} else {
|
|
var_value = var_is_ptr ?
|
|
target_value_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, target_value_ptr);
|
|
}
|
|
build_decl_var_and_init(irb, scope, var_symbol_node, var, var_value, buf_ptr(var_name), var_is_comptime);
|
|
} else {
|
|
child_scope = scope;
|
|
}
|
|
|
|
IrInstSrc *expr_result = ir_gen_node_extra(irb, expr_node, child_scope, lval, result_loc);
|
|
if (expr_result == irb->codegen->invalid_inst_src)
|
|
return false;
|
|
if (!instr_is_unreachable(expr_result))
|
|
ir_mark_gen(ir_build_br(irb, scope, switch_node, end_block, is_comptime));
|
|
incoming_blocks->append(irb->current_basic_block);
|
|
incoming_values->append(expr_result);
|
|
return true;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_switch_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeSwitchExpr);
|
|
|
|
AstNode *target_node = node->data.switch_expr.expr;
|
|
IrInstSrc *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr);
|
|
if (target_value_ptr == irb->codegen->invalid_inst_src)
|
|
return target_value_ptr;
|
|
IrInstSrc *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr);
|
|
|
|
IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "SwitchElse");
|
|
IrBasicBlockSrc *end_block = ir_create_basic_block(irb, scope, "SwitchEnd");
|
|
|
|
size_t prong_count = node->data.switch_expr.prongs.length;
|
|
ZigList<IrInstSrcSwitchBrCase> cases = {0};
|
|
|
|
IrInstSrc *is_comptime;
|
|
IrInstSrc *var_is_comptime;
|
|
if (ir_should_inline(irb->exec, scope)) {
|
|
is_comptime = ir_build_const_bool(irb, scope, node, true);
|
|
var_is_comptime = is_comptime;
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, scope, node, target_value);
|
|
var_is_comptime = ir_build_test_comptime(irb, scope, node, target_value_ptr);
|
|
}
|
|
|
|
ZigList<IrInstSrc *> incoming_values = {0};
|
|
ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
|
|
ZigList<IrInstSrcCheckSwitchProngsRange> check_ranges = {0};
|
|
|
|
IrInstSrcSwitchElseVar *switch_else_var = nullptr;
|
|
|
|
ResultLocPeerParent *peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
|
|
peer_parent->base.id = ResultLocIdPeerParent;
|
|
peer_parent->base.allow_write_through_const = result_loc->allow_write_through_const;
|
|
peer_parent->end_bb = end_block;
|
|
peer_parent->is_comptime = is_comptime;
|
|
peer_parent->parent = result_loc;
|
|
|
|
ir_build_reset_result(irb, scope, node, &peer_parent->base);
|
|
|
|
// First do the else and the ranges
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
|
|
Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);
|
|
AstNode *else_prong = nullptr;
|
|
AstNode *underscore_prong = nullptr;
|
|
for (size_t prong_i = 0; prong_i < prong_count; prong_i += 1) {
|
|
AstNode *prong_node = node->data.switch_expr.prongs.at(prong_i);
|
|
size_t prong_item_count = prong_node->data.switch_prong.items.length;
|
|
if (prong_node->data.switch_prong.any_items_are_range) {
|
|
ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);
|
|
|
|
IrInstSrc *ok_bit = nullptr;
|
|
AstNode *last_item_node = nullptr;
|
|
for (size_t item_i = 0; item_i < prong_item_count; item_i += 1) {
|
|
AstNode *item_node = prong_node->data.switch_prong.items.at(item_i);
|
|
last_item_node = item_node;
|
|
if (item_node->type == NodeTypeSwitchRange) {
|
|
AstNode *start_node = item_node->data.switch_range.start;
|
|
AstNode *end_node = item_node->data.switch_range.end;
|
|
|
|
IrInstSrc *start_value = ir_gen_node(irb, start_node, comptime_scope);
|
|
if (start_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *end_value = ir_gen_node(irb, end_node, comptime_scope);
|
|
if (end_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
|
|
check_range->start = start_value;
|
|
check_range->end = end_value;
|
|
|
|
IrInstSrc *lower_range_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpGreaterOrEq,
|
|
target_value, start_value, false);
|
|
IrInstSrc *upper_range_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpLessOrEq,
|
|
target_value, end_value, false);
|
|
IrInstSrc *both_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolAnd,
|
|
lower_range_ok, upper_range_ok, false);
|
|
if (ok_bit) {
|
|
ok_bit = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolOr, both_ok, ok_bit, false);
|
|
} else {
|
|
ok_bit = both_ok;
|
|
}
|
|
} else {
|
|
IrInstSrc *item_value = ir_gen_node(irb, item_node, comptime_scope);
|
|
if (item_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
|
|
check_range->start = item_value;
|
|
check_range->end = item_value;
|
|
|
|
IrInstSrc *cmp_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpEq,
|
|
item_value, target_value, false);
|
|
if (ok_bit) {
|
|
ok_bit = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolOr, cmp_ok, ok_bit, false);
|
|
} else {
|
|
ok_bit = cmp_ok;
|
|
}
|
|
}
|
|
}
|
|
|
|
IrBasicBlockSrc *range_block_yes = ir_create_basic_block(irb, scope, "SwitchRangeYes");
|
|
IrBasicBlockSrc *range_block_no = ir_create_basic_block(irb, scope, "SwitchRangeNo");
|
|
|
|
assert(ok_bit);
|
|
assert(last_item_node);
|
|
IrInstSrc *br_inst = ir_mark_gen(ir_build_cond_br(irb, scope, last_item_node, ok_bit,
|
|
range_block_yes, range_block_no, is_comptime));
|
|
if (peer_parent->base.source_instruction == nullptr) {
|
|
peer_parent->base.source_instruction = br_inst;
|
|
}
|
|
|
|
if (peer_parent->peers.length > 0) {
|
|
peer_parent->peers.last()->next_bb = range_block_yes;
|
|
}
|
|
peer_parent->peers.append(this_peer_result_loc);
|
|
ir_set_cursor_at_end_and_append_block(irb, range_block_yes);
|
|
if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
|
|
is_comptime, var_is_comptime, target_value_ptr, nullptr, 0,
|
|
&incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base))
|
|
{
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, range_block_no);
|
|
} else {
|
|
if (prong_item_count == 0) {
|
|
if (else_prong) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
|
|
buf_sprintf("multiple else prongs in switch expression"));
|
|
add_error_note(irb->codegen, msg, else_prong,
|
|
buf_sprintf("previous else prong is here"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
else_prong = prong_node;
|
|
} else if (prong_item_count == 1 &&
|
|
prong_node->data.switch_prong.items.at(0)->type == NodeTypeSymbol &&
|
|
buf_eql_str(prong_node->data.switch_prong.items.at(0)->data.symbol_expr.symbol, "_")) {
|
|
if (underscore_prong) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
|
|
buf_sprintf("multiple '_' prongs in switch expression"));
|
|
add_error_note(irb->codegen, msg, underscore_prong,
|
|
buf_sprintf("previous '_' prong is here"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
underscore_prong = prong_node;
|
|
} else {
|
|
continue;
|
|
}
|
|
if (underscore_prong && else_prong) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
|
|
buf_sprintf("else and '_' prong in switch expression"));
|
|
if (underscore_prong == prong_node)
|
|
add_error_note(irb->codegen, msg, else_prong,
|
|
buf_sprintf("else prong is here"));
|
|
else
|
|
add_error_note(irb->codegen, msg, underscore_prong,
|
|
buf_sprintf("'_' prong is here"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);
|
|
|
|
IrBasicBlockSrc *prev_block = irb->current_basic_block;
|
|
if (peer_parent->peers.length > 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
peer_parent->peers.append(this_peer_result_loc);
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
|
|
is_comptime, var_is_comptime, target_value_ptr, nullptr, 0, &incoming_blocks, &incoming_values,
|
|
&switch_else_var, LValNone, &this_peer_result_loc->base))
|
|
{
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
ir_set_cursor_at_end(irb, prev_block);
|
|
}
|
|
}
|
|
|
|
// next do the non-else non-ranges
|
|
for (size_t prong_i = 0; prong_i < prong_count; prong_i += 1) {
|
|
AstNode *prong_node = node->data.switch_expr.prongs.at(prong_i);
|
|
size_t prong_item_count = prong_node->data.switch_prong.items.length;
|
|
if (prong_item_count == 0)
|
|
continue;
|
|
if (prong_node->data.switch_prong.any_items_are_range)
|
|
continue;
|
|
if (underscore_prong == prong_node)
|
|
continue;
|
|
|
|
ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);
|
|
|
|
IrBasicBlockSrc *prong_block = ir_create_basic_block(irb, scope, "SwitchProng");
|
|
IrInstSrc **items = heap::c_allocator.allocate<IrInstSrc *>(prong_item_count);
|
|
|
|
for (size_t item_i = 0; item_i < prong_item_count; item_i += 1) {
|
|
AstNode *item_node = prong_node->data.switch_prong.items.at(item_i);
|
|
assert(item_node->type != NodeTypeSwitchRange);
|
|
|
|
IrInstSrc *item_value = ir_gen_node(irb, item_node, comptime_scope);
|
|
if (item_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
|
|
check_range->start = item_value;
|
|
check_range->end = item_value;
|
|
|
|
IrInstSrcSwitchBrCase *this_case = cases.add_one();
|
|
this_case->value = item_value;
|
|
this_case->block = prong_block;
|
|
|
|
items[item_i] = item_value;
|
|
}
|
|
|
|
IrBasicBlockSrc *prev_block = irb->current_basic_block;
|
|
if (peer_parent->peers.length > 0) {
|
|
peer_parent->peers.last()->next_bb = prong_block;
|
|
}
|
|
peer_parent->peers.append(this_peer_result_loc);
|
|
ir_set_cursor_at_end_and_append_block(irb, prong_block);
|
|
if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
|
|
is_comptime, var_is_comptime, target_value_ptr, items, prong_item_count,
|
|
&incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base))
|
|
{
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
ir_set_cursor_at_end(irb, prev_block);
|
|
|
|
}
|
|
|
|
IrInstSrc *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value,
|
|
check_ranges.items, check_ranges.length, else_prong, underscore_prong != nullptr);
|
|
|
|
IrInstSrc *br_instruction;
|
|
if (cases.length == 0) {
|
|
br_instruction = ir_build_br(irb, scope, node, else_block, is_comptime);
|
|
} else {
|
|
IrInstSrcSwitchBr *switch_br = ir_build_switch_br_src(irb, scope, node, target_value, else_block,
|
|
cases.length, cases.items, is_comptime, switch_prongs_void);
|
|
if (switch_else_var != nullptr) {
|
|
switch_else_var->switch_br = switch_br;
|
|
}
|
|
br_instruction = &switch_br->base;
|
|
}
|
|
if (peer_parent->base.source_instruction == nullptr) {
|
|
peer_parent->base.source_instruction = br_instruction;
|
|
}
|
|
for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
|
|
peer_parent->peers.at(i)->base.source_instruction = peer_parent->base.source_instruction;
|
|
}
|
|
|
|
if (!else_prong && !underscore_prong) {
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = else_block;
|
|
}
|
|
ir_set_cursor_at_end_and_append_block(irb, else_block);
|
|
ir_build_unreachable(irb, scope, node);
|
|
} else {
|
|
if (peer_parent->peers.length != 0) {
|
|
peer_parent->peers.last()->next_bb = end_block;
|
|
}
|
|
}
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
assert(incoming_blocks.length == incoming_values.length);
|
|
IrInstSrc *result_instruction;
|
|
if (incoming_blocks.length == 0) {
|
|
result_instruction = ir_build_const_void(irb, scope, node);
|
|
} else {
|
|
result_instruction = ir_build_phi(irb, scope, node, incoming_blocks.length,
|
|
incoming_blocks.items, incoming_values.items, peer_parent);
|
|
}
|
|
return ir_lval_wrap(irb, scope, result_instruction, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_comptime(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval) {
|
|
assert(node->type == NodeTypeCompTime);
|
|
|
|
Scope *child_scope = create_comptime_scope(irb->codegen, node, parent_scope);
|
|
// purposefully pass null for result_loc and let EndExpr handle it
|
|
return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_nosuspend(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval) {
|
|
assert(node->type == NodeTypeNoSuspend);
|
|
|
|
Scope *child_scope = create_nosuspend_scope(irb->codegen, node, parent_scope);
|
|
// purposefully pass null for result_loc and let EndExpr handle it
|
|
return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_return_from_block(IrBuilderSrc *irb, Scope *break_scope, AstNode *node, ScopeBlock *block_scope) {
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, break_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, break_scope, node, true);
|
|
} else {
|
|
is_comptime = block_scope->is_comptime;
|
|
}
|
|
|
|
IrInstSrc *result_value;
|
|
if (node->data.break_expr.expr) {
|
|
ResultLocPeer *peer_result = create_peer_result(block_scope->peer_parent);
|
|
block_scope->peer_parent->peers.append(peer_result);
|
|
|
|
result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope, block_scope->lval,
|
|
&peer_result->base);
|
|
if (result_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
result_value = ir_build_const_void(irb, break_scope, node);
|
|
}
|
|
|
|
IrBasicBlockSrc *dest_block = block_scope->end_block;
|
|
if (!ir_gen_defers_for_block(irb, break_scope, dest_block->scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
block_scope->incoming_blocks->append(irb->current_basic_block);
|
|
block_scope->incoming_values->append(result_value);
|
|
return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_break(IrBuilderSrc *irb, Scope *break_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeBreak);
|
|
|
|
// Search up the scope. We'll find one of these things first:
|
|
// * function definition scope or global scope => error, break outside loop
|
|
// * defer expression scope => error, cannot break out of defer expression
|
|
// * loop scope => OK
|
|
// * (if it's a labeled break) labeled block => OK
|
|
|
|
Scope *search_scope = break_scope;
|
|
ScopeLoop *loop_scope;
|
|
for (;;) {
|
|
if (search_scope == nullptr || search_scope->id == ScopeIdFnDef) {
|
|
if (node->data.break_expr.name != nullptr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("label not found: '%s'", buf_ptr(node->data.break_expr.name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
add_node_error(irb->codegen, node, buf_sprintf("break expression outside loop"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
} else if (search_scope->id == ScopeIdDeferExpr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("cannot break out of defer expression"));
|
|
return irb->codegen->invalid_inst_src;
|
|
} else if (search_scope->id == ScopeIdLoop) {
|
|
ScopeLoop *this_loop_scope = (ScopeLoop *)search_scope;
|
|
if (node->data.break_expr.name == nullptr ||
|
|
(this_loop_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_loop_scope->name)))
|
|
{
|
|
this_loop_scope->name_used = true;
|
|
loop_scope = this_loop_scope;
|
|
break;
|
|
}
|
|
} else if (search_scope->id == ScopeIdBlock) {
|
|
ScopeBlock *this_block_scope = (ScopeBlock *)search_scope;
|
|
if (node->data.break_expr.name != nullptr &&
|
|
(this_block_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_block_scope->name)))
|
|
{
|
|
assert(this_block_scope->end_block != nullptr);
|
|
this_block_scope->name_used = true;
|
|
return ir_gen_return_from_block(irb, break_scope, node, this_block_scope);
|
|
}
|
|
} else if (search_scope->id == ScopeIdSuspend) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
search_scope = search_scope->parent;
|
|
}
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, break_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, break_scope, node, true);
|
|
} else {
|
|
is_comptime = loop_scope->is_comptime;
|
|
}
|
|
|
|
IrInstSrc *result_value;
|
|
if (node->data.break_expr.expr) {
|
|
ResultLocPeer *peer_result = create_peer_result(loop_scope->peer_parent);
|
|
loop_scope->peer_parent->peers.append(peer_result);
|
|
|
|
result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope,
|
|
loop_scope->lval, &peer_result->base);
|
|
if (result_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
result_value = ir_build_const_void(irb, break_scope, node);
|
|
}
|
|
|
|
IrBasicBlockSrc *dest_block = loop_scope->break_block;
|
|
if (!ir_gen_defers_for_block(irb, break_scope, dest_block->scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
loop_scope->incoming_blocks->append(irb->current_basic_block);
|
|
loop_scope->incoming_values->append(result_value);
|
|
return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_continue(IrBuilderSrc *irb, Scope *continue_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeContinue);
|
|
|
|
// Search up the scope. We'll find one of these things first:
|
|
// * function definition scope or global scope => error, break outside loop
|
|
// * defer expression scope => error, cannot break out of defer expression
|
|
// * loop scope => OK
|
|
|
|
ZigList<ScopeRuntime *> runtime_scopes = {};
|
|
|
|
Scope *search_scope = continue_scope;
|
|
ScopeLoop *loop_scope;
|
|
for (;;) {
|
|
if (search_scope == nullptr || search_scope->id == ScopeIdFnDef) {
|
|
if (node->data.continue_expr.name != nullptr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("labeled loop not found: '%s'", buf_ptr(node->data.continue_expr.name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
add_node_error(irb->codegen, node, buf_sprintf("continue expression outside loop"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
} else if (search_scope->id == ScopeIdDeferExpr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("cannot continue out of defer expression"));
|
|
return irb->codegen->invalid_inst_src;
|
|
} else if (search_scope->id == ScopeIdLoop) {
|
|
ScopeLoop *this_loop_scope = (ScopeLoop *)search_scope;
|
|
if (node->data.continue_expr.name == nullptr ||
|
|
(this_loop_scope->name != nullptr && buf_eql_buf(node->data.continue_expr.name, this_loop_scope->name)))
|
|
{
|
|
this_loop_scope->name_used = true;
|
|
loop_scope = this_loop_scope;
|
|
break;
|
|
}
|
|
} else if (search_scope->id == ScopeIdRuntime) {
|
|
ScopeRuntime *scope_runtime = (ScopeRuntime *)search_scope;
|
|
runtime_scopes.append(scope_runtime);
|
|
}
|
|
search_scope = search_scope->parent;
|
|
}
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, continue_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, continue_scope, node, true);
|
|
} else {
|
|
is_comptime = loop_scope->is_comptime;
|
|
}
|
|
|
|
for (size_t i = 0; i < runtime_scopes.length; i += 1) {
|
|
ScopeRuntime *scope_runtime = runtime_scopes.at(i);
|
|
ir_mark_gen(ir_build_check_runtime_scope(irb, continue_scope, node, scope_runtime->is_comptime, is_comptime));
|
|
}
|
|
runtime_scopes.deinit();
|
|
|
|
IrBasicBlockSrc *dest_block = loop_scope->continue_block;
|
|
if (!ir_gen_defers_for_block(irb, continue_scope, dest_block->scope, nullptr, nullptr))
|
|
return irb->codegen->invalid_inst_src;
|
|
return ir_mark_gen(ir_build_br(irb, continue_scope, node, dest_block, is_comptime));
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_error_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeErrorType);
|
|
return ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_global_error_set);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_defer(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeDefer);
|
|
|
|
ScopeDefer *defer_child_scope = create_defer_scope(irb->codegen, node, parent_scope);
|
|
node->data.defer.child_scope = &defer_child_scope->base;
|
|
|
|
ScopeDeferExpr *defer_expr_scope = create_defer_expr_scope(irb->codegen, node, parent_scope);
|
|
node->data.defer.expr_scope = &defer_expr_scope->base;
|
|
|
|
return ir_build_const_void(irb, parent_scope, node);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_slice(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
|
|
assert(node->type == NodeTypeSliceExpr);
|
|
|
|
AstNodeSliceExpr *slice_expr = &node->data.slice_expr;
|
|
AstNode *array_node = slice_expr->array_ref_expr;
|
|
AstNode *start_node = slice_expr->start;
|
|
AstNode *end_node = slice_expr->end;
|
|
AstNode *sentinel_node = slice_expr->sentinel;
|
|
|
|
IrInstSrc *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr, nullptr);
|
|
if (ptr_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *start_value = ir_gen_node(irb, start_node, scope);
|
|
if (start_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *end_value;
|
|
if (end_node) {
|
|
end_value = ir_gen_node(irb, end_node, scope);
|
|
if (end_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
end_value = nullptr;
|
|
}
|
|
|
|
IrInstSrc *sentinel_value;
|
|
if (sentinel_node) {
|
|
sentinel_value = ir_gen_node(irb, sentinel_node, scope);
|
|
if (sentinel_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
} else {
|
|
sentinel_value = nullptr;
|
|
}
|
|
|
|
IrInstSrc *slice = ir_build_slice_src(irb, scope, node, ptr_value, start_value, end_value,
|
|
sentinel_value, true, result_loc);
|
|
return ir_lval_wrap(irb, scope, slice, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_catch(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeCatchExpr);
|
|
|
|
AstNode *op1_node = node->data.unwrap_err_expr.op1;
|
|
AstNode *op2_node = node->data.unwrap_err_expr.op2;
|
|
AstNode *var_node = node->data.unwrap_err_expr.symbol;
|
|
|
|
if (op2_node->type == NodeTypeUnreachable) {
|
|
if (var_node != nullptr) {
|
|
assert(var_node->type == NodeTypeSymbol);
|
|
Buf *var_name = var_node->data.symbol_expr.symbol;
|
|
add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
return ir_gen_catch_unreachable(irb, parent_scope, node, op1_node, lval, result_loc);
|
|
}
|
|
|
|
|
|
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, op1_node, parent_scope);
|
|
spill_scope->spill_harder = true;
|
|
|
|
IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, op1_node, &spill_scope->base, LValPtr, nullptr);
|
|
if (err_union_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false);
|
|
|
|
IrInstSrc *is_comptime;
|
|
if (ir_should_inline(irb->exec, parent_scope)) {
|
|
is_comptime = ir_build_const_bool(irb, parent_scope, node, true);
|
|
} else {
|
|
is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_err);
|
|
}
|
|
|
|
IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrOk");
|
|
IrBasicBlockSrc *err_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrError");
|
|
IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrEnd");
|
|
IrInstSrc *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_err, err_block, ok_block, is_comptime);
|
|
|
|
ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block, result_loc,
|
|
is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, err_block);
|
|
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
|
|
Scope *err_scope;
|
|
if (var_node) {
|
|
assert(var_node->type == NodeTypeSymbol);
|
|
Buf *var_name = var_node->data.symbol_expr.symbol;
|
|
bool is_const = true;
|
|
bool is_shadowable = false;
|
|
ZigVar *var = ir_create_var(irb, node, subexpr_scope, var_name,
|
|
is_const, is_const, is_shadowable, is_comptime);
|
|
err_scope = var->child_scope;
|
|
IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, err_scope, node, err_union_ptr);
|
|
IrInstSrc *err_value = ir_build_load_ptr(irb, err_scope, var_node, err_ptr);
|
|
build_decl_var_and_init(irb, err_scope, var_node, var, err_value, buf_ptr(var_name), is_comptime);
|
|
} else {
|
|
err_scope = subexpr_scope;
|
|
}
|
|
IrInstSrc *err_result = ir_gen_node_extra(irb, op2_node, err_scope, LValNone, &peer_parent->peers.at(0)->base);
|
|
if (err_result == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
IrBasicBlockSrc *after_err_block = irb->current_basic_block;
|
|
if (!instr_is_unreachable(err_result))
|
|
ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, ok_block);
|
|
IrInstSrc *unwrapped_ptr = ir_build_unwrap_err_payload_src(irb, parent_scope, node, err_union_ptr, false, false);
|
|
IrInstSrc *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr);
|
|
ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base);
|
|
IrBasicBlockSrc *after_ok_block = irb->current_basic_block;
|
|
ir_build_br(irb, parent_scope, node, end_block, is_comptime);
|
|
|
|
ir_set_cursor_at_end_and_append_block(irb, end_block);
|
|
IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
|
|
incoming_values[0] = err_result;
|
|
incoming_values[1] = unwrapped_payload;
|
|
IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
|
|
incoming_blocks[0] = after_err_block;
|
|
incoming_blocks[1] = after_ok_block;
|
|
IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent);
|
|
return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
|
|
}
|
|
|
|
static bool render_instance_name_recursive(CodeGen *codegen, Buf *name, Scope *outer_scope, Scope *inner_scope) {
|
|
if (inner_scope == nullptr || inner_scope == outer_scope) return false;
|
|
bool need_comma = render_instance_name_recursive(codegen, name, outer_scope, inner_scope->parent);
|
|
if (inner_scope->id != ScopeIdVarDecl)
|
|
return need_comma;
|
|
|
|
ScopeVarDecl *var_scope = (ScopeVarDecl *)inner_scope;
|
|
if (need_comma)
|
|
buf_append_char(name, ',');
|
|
// TODO: const ptr reinterpret here to make the var type agree with the value?
|
|
render_const_value(codegen, name, var_scope->var->const_value);
|
|
return true;
|
|
}
|
|
|
|
static Buf *get_anon_type_name(CodeGen *codegen, IrExecutableSrc *exec, const char *kind_name,
|
|
Scope *scope, AstNode *source_node, Buf *out_bare_name)
|
|
{
|
|
if (exec != nullptr && exec->name) {
|
|
ZigType *import = get_scope_import(scope);
|
|
Buf *namespace_name = buf_alloc();
|
|
append_namespace_qualification(codegen, namespace_name, import);
|
|
buf_append_buf(namespace_name, exec->name);
|
|
buf_init_from_buf(out_bare_name, exec->name);
|
|
return namespace_name;
|
|
} else if (exec != nullptr && exec->name_fn != nullptr) {
|
|
Buf *name = buf_alloc();
|
|
buf_append_buf(name, &exec->name_fn->symbol_name);
|
|
buf_appendf(name, "(");
|
|
render_instance_name_recursive(codegen, name, &exec->name_fn->fndef_scope->base, exec->begin_scope);
|
|
buf_appendf(name, ")");
|
|
buf_init_from_buf(out_bare_name, name);
|
|
return name;
|
|
} else {
|
|
ZigType *import = get_scope_import(scope);
|
|
Buf *namespace_name = buf_alloc();
|
|
append_namespace_qualification(codegen, namespace_name, import);
|
|
buf_appendf(namespace_name, "%s:%" ZIG_PRI_usize ":%" ZIG_PRI_usize, kind_name,
|
|
source_node->line + 1, source_node->column + 1);
|
|
buf_init_from_buf(out_bare_name, namespace_name);
|
|
return namespace_name;
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_container_decl(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeContainerDecl);
|
|
|
|
ContainerKind kind = node->data.container_decl.kind;
|
|
Buf *bare_name = buf_alloc();
|
|
Buf *name = get_anon_type_name(irb->codegen, irb->exec, container_string(kind), parent_scope, node, bare_name);
|
|
|
|
ContainerLayout layout = node->data.container_decl.layout;
|
|
ZigType *container_type = get_partial_container_type(irb->codegen, parent_scope,
|
|
kind, node, buf_ptr(name), bare_name, layout);
|
|
ScopeDecls *child_scope = get_container_scope(container_type);
|
|
|
|
for (size_t i = 0; i < node->data.container_decl.decls.length; i += 1) {
|
|
AstNode *child_node = node->data.container_decl.decls.at(i);
|
|
scan_decls(irb->codegen, child_scope, child_node);
|
|
}
|
|
|
|
TldContainer *tld_container = heap::c_allocator.create<TldContainer>();
|
|
init_tld(&tld_container->base, TldIdContainer, bare_name, VisibModPub, node, parent_scope);
|
|
tld_container->type_entry = container_type;
|
|
tld_container->decls_scope = child_scope;
|
|
irb->codegen->resolve_queue.append(&tld_container->base);
|
|
|
|
// Add this to the list to mark as invalid if analyzing this exec fails.
|
|
irb->exec->tld_list.append(&tld_container->base);
|
|
|
|
return ir_build_const_type(irb, parent_scope, node, container_type);
|
|
}
|
|
|
|
// errors should be populated with set1's values
|
|
static ZigType *get_error_set_union(CodeGen *g, ErrorTableEntry **errors, ZigType *set1, ZigType *set2,
|
|
Buf *type_name)
|
|
{
|
|
assert(set1->id == ZigTypeIdErrorSet);
|
|
assert(set2->id == ZigTypeIdErrorSet);
|
|
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
err_set_type->size_in_bits = g->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = g->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = g->builtin_types.entry_global_error_set->abi_size;
|
|
if (type_name == nullptr) {
|
|
buf_resize(&err_set_type->name, 0);
|
|
buf_appendf(&err_set_type->name, "error{");
|
|
} else {
|
|
buf_init_from_buf(&err_set_type->name, type_name);
|
|
}
|
|
|
|
for (uint32_t i = 0, count = set1->data.error_set.err_count; i < count; i += 1) {
|
|
assert(errors[set1->data.error_set.errors[i]->value] == set1->data.error_set.errors[i]);
|
|
}
|
|
|
|
uint32_t count = set1->data.error_set.err_count;
|
|
for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
|
|
if (errors[error_entry->value] == nullptr) {
|
|
count += 1;
|
|
}
|
|
}
|
|
|
|
err_set_type->data.error_set.err_count = count;
|
|
err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(count);
|
|
|
|
bool need_comma = false;
|
|
for (uint32_t i = 0; i < set1->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = set1->data.error_set.errors[i];
|
|
if (type_name == nullptr) {
|
|
const char *comma = need_comma ? "," : "";
|
|
need_comma = true;
|
|
buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&error_entry->name));
|
|
}
|
|
err_set_type->data.error_set.errors[i] = error_entry;
|
|
}
|
|
|
|
uint32_t index = set1->data.error_set.err_count;
|
|
for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
|
|
if (errors[error_entry->value] == nullptr) {
|
|
errors[error_entry->value] = error_entry;
|
|
if (type_name == nullptr) {
|
|
const char *comma = need_comma ? "," : "";
|
|
need_comma = true;
|
|
buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&error_entry->name));
|
|
}
|
|
err_set_type->data.error_set.errors[index] = error_entry;
|
|
index += 1;
|
|
}
|
|
}
|
|
assert(index == count);
|
|
|
|
if (type_name == nullptr) {
|
|
buf_appendf(&err_set_type->name, "}");
|
|
}
|
|
|
|
return err_set_type;
|
|
|
|
}
|
|
|
|
static ZigType *make_err_set_with_one_item(CodeGen *g, Scope *parent_scope, AstNode *node,
|
|
ErrorTableEntry *err_entry)
|
|
{
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
buf_resize(&err_set_type->name, 0);
|
|
buf_appendf(&err_set_type->name, "error{%s}", buf_ptr(&err_entry->name));
|
|
err_set_type->size_in_bits = g->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = g->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = g->builtin_types.entry_global_error_set->abi_size;
|
|
err_set_type->data.error_set.err_count = 1;
|
|
err_set_type->data.error_set.errors = heap::c_allocator.create<ErrorTableEntry *>();
|
|
|
|
err_set_type->data.error_set.errors[0] = err_entry;
|
|
|
|
return err_set_type;
|
|
}
|
|
|
|
static AstNode *ast_field_to_symbol_node(AstNode *err_set_field_node) {
|
|
if (err_set_field_node->type == NodeTypeSymbol) {
|
|
return err_set_field_node;
|
|
} else if (err_set_field_node->type == NodeTypeErrorSetField) {
|
|
assert(err_set_field_node->data.err_set_field.field_name->type == NodeTypeSymbol);
|
|
return err_set_field_node->data.err_set_field.field_name;
|
|
} else {
|
|
return err_set_field_node;
|
|
}
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_err_set_decl(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeErrorSetDecl);
|
|
|
|
uint32_t err_count = node->data.err_set_decl.decls.length;
|
|
|
|
Buf bare_name = BUF_INIT;
|
|
Buf *type_name = get_anon_type_name(irb->codegen, irb->exec, "error", parent_scope, node, &bare_name);
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
buf_init_from_buf(&err_set_type->name, type_name);
|
|
err_set_type->data.error_set.err_count = err_count;
|
|
err_set_type->size_in_bits = irb->codegen->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = irb->codegen->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = irb->codegen->builtin_types.entry_global_error_set->abi_size;
|
|
err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(err_count);
|
|
|
|
size_t errors_count = irb->codegen->errors_by_index.length + err_count;
|
|
ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
|
|
|
|
for (uint32_t i = 0; i < err_count; i += 1) {
|
|
AstNode *field_node = node->data.err_set_decl.decls.at(i);
|
|
AstNode *symbol_node = ast_field_to_symbol_node(field_node);
|
|
Buf *err_name = symbol_node->data.symbol_expr.symbol;
|
|
ErrorTableEntry *err = heap::c_allocator.create<ErrorTableEntry>();
|
|
err->decl_node = field_node;
|
|
buf_init_from_buf(&err->name, err_name);
|
|
|
|
auto existing_entry = irb->codegen->error_table.put_unique(err_name, err);
|
|
if (existing_entry) {
|
|
err->value = existing_entry->value->value;
|
|
} else {
|
|
size_t error_value_count = irb->codegen->errors_by_index.length;
|
|
assert((uint32_t)error_value_count < (((uint32_t)1) << (uint32_t)irb->codegen->err_tag_type->data.integral.bit_count));
|
|
err->value = error_value_count;
|
|
irb->codegen->errors_by_index.append(err);
|
|
}
|
|
err_set_type->data.error_set.errors[i] = err;
|
|
|
|
ErrorTableEntry *prev_err = errors[err->value];
|
|
if (prev_err != nullptr) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, ast_field_to_symbol_node(err->decl_node),
|
|
buf_sprintf("duplicate error: '%s'", buf_ptr(&err->name)));
|
|
add_error_note(irb->codegen, msg, ast_field_to_symbol_node(prev_err->decl_node),
|
|
buf_sprintf("other error here"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
errors[err->value] = err;
|
|
}
|
|
heap::c_allocator.deallocate(errors, errors_count);
|
|
return ir_build_const_type(irb, parent_scope, node, err_set_type);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_fn_proto(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeFnProto);
|
|
|
|
size_t param_count = node->data.fn_proto.params.length;
|
|
IrInstSrc **param_types = heap::c_allocator.allocate<IrInstSrc*>(param_count);
|
|
|
|
bool is_var_args = false;
|
|
for (size_t i = 0; i < param_count; i += 1) {
|
|
AstNode *param_node = node->data.fn_proto.params.at(i);
|
|
if (param_node->data.param_decl.is_var_args) {
|
|
is_var_args = true;
|
|
break;
|
|
}
|
|
if (param_node->data.param_decl.anytype_token == nullptr) {
|
|
AstNode *type_node = param_node->data.param_decl.type;
|
|
IrInstSrc *type_value = ir_gen_node(irb, type_node, parent_scope);
|
|
if (type_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
param_types[i] = type_value;
|
|
} else {
|
|
param_types[i] = nullptr;
|
|
}
|
|
}
|
|
|
|
IrInstSrc *align_value = nullptr;
|
|
if (node->data.fn_proto.align_expr != nullptr) {
|
|
align_value = ir_gen_node(irb, node->data.fn_proto.align_expr, parent_scope);
|
|
if (align_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *callconv_value = nullptr;
|
|
if (node->data.fn_proto.callconv_expr != nullptr) {
|
|
callconv_value = ir_gen_node(irb, node->data.fn_proto.callconv_expr, parent_scope);
|
|
if (callconv_value == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *return_type;
|
|
if (node->data.fn_proto.return_anytype_token == nullptr) {
|
|
if (node->data.fn_proto.return_type == nullptr) {
|
|
return_type = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_void);
|
|
} else {
|
|
return_type = ir_gen_node(irb, node->data.fn_proto.return_type, parent_scope);
|
|
if (return_type == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
} else {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"));
|
|
return irb->codegen->invalid_inst_src;
|
|
//return_type = nullptr;
|
|
}
|
|
|
|
return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, callconv_value, return_type, is_var_args);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_resume(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
|
|
assert(node->type == NodeTypeResume);
|
|
if (get_scope_nosuspend(scope) != nullptr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("resume in nosuspend scope"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
|
|
if (target_inst == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
return ir_build_resume_src(irb, scope, node, target_inst);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_await_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
assert(node->type == NodeTypeAwaitExpr);
|
|
|
|
bool is_nosuspend = get_scope_nosuspend(scope) != nullptr;
|
|
|
|
AstNode *expr_node = node->data.await_expr.expr;
|
|
if (expr_node->type == NodeTypeFnCallExpr && expr_node->data.fn_call_expr.modifier == CallModifierBuiltin) {
|
|
AstNode *fn_ref_expr = expr_node->data.fn_call_expr.fn_ref_expr;
|
|
Buf *name = fn_ref_expr->data.symbol_expr.symbol;
|
|
auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
|
|
if (entry != nullptr) {
|
|
BuiltinFnEntry *builtin_fn = entry->value;
|
|
if (builtin_fn->id == BuiltinFnIdAsyncCall) {
|
|
return ir_gen_async_call(irb, scope, node, expr_node, lval, result_loc);
|
|
}
|
|
}
|
|
}
|
|
|
|
ZigFn *fn_entry = exec_fn_entry(irb->exec);
|
|
if (!fn_entry) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
|
|
if (existing_suspend_scope) {
|
|
if (!existing_suspend_scope->reported_err) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block"));
|
|
add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
|
|
existing_suspend_scope->reported_err = true;
|
|
}
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrc *target_inst = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
|
|
if (target_inst == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc, is_nosuspend);
|
|
return ir_lval_wrap(irb, scope, await_inst, lval, result_loc);
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_suspend(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
|
|
assert(node->type == NodeTypeSuspend);
|
|
|
|
ZigFn *fn_entry = exec_fn_entry(irb->exec);
|
|
if (!fn_entry) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
if (get_scope_nosuspend(parent_scope) != nullptr) {
|
|
add_node_error(irb->codegen, node, buf_sprintf("suspend in nosuspend scope"));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
|
|
if (existing_suspend_scope) {
|
|
if (!existing_suspend_scope->reported_err) {
|
|
ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block"));
|
|
add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here"));
|
|
existing_suspend_scope->reported_err = true;
|
|
}
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
IrInstSrcSuspendBegin *begin = ir_build_suspend_begin_src(irb, parent_scope, node);
|
|
if (node->data.suspend.block != nullptr) {
|
|
ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
|
|
Scope *child_scope = &suspend_scope->base;
|
|
IrInstSrc *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
|
|
if (susp_res == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
|
|
}
|
|
|
|
return ir_mark_gen(ir_build_suspend_finish_src(irb, parent_scope, node, begin));
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_node_raw(IrBuilderSrc *irb, AstNode *node, Scope *scope,
|
|
LVal lval, ResultLoc *result_loc)
|
|
{
|
|
assert(scope);
|
|
switch (node->type) {
|
|
case NodeTypeStructValueField:
|
|
case NodeTypeParamDecl:
|
|
case NodeTypeUsingNamespace:
|
|
case NodeTypeSwitchProng:
|
|
case NodeTypeSwitchRange:
|
|
case NodeTypeStructField:
|
|
case NodeTypeErrorSetField:
|
|
case NodeTypeFnDef:
|
|
case NodeTypeTestDecl:
|
|
zig_unreachable();
|
|
case NodeTypeBlock:
|
|
return ir_gen_block(irb, scope, node, lval, result_loc);
|
|
case NodeTypeGroupedExpr:
|
|
return ir_gen_node_raw(irb, node->data.grouped_expr, scope, lval, result_loc);
|
|
case NodeTypeBinOpExpr:
|
|
return ir_gen_bin_op(irb, scope, node, lval, result_loc);
|
|
case NodeTypeIntLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_int_lit(irb, scope, node), lval, result_loc);
|
|
case NodeTypeFloatLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_float_lit(irb, scope, node), lval, result_loc);
|
|
case NodeTypeCharLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_char_lit(irb, scope, node), lval, result_loc);
|
|
case NodeTypeSymbol:
|
|
return ir_gen_symbol(irb, scope, node, lval, result_loc);
|
|
case NodeTypeFnCallExpr:
|
|
return ir_gen_fn_call(irb, scope, node, lval, result_loc);
|
|
case NodeTypeIfBoolExpr:
|
|
return ir_gen_if_bool_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypePrefixOpExpr:
|
|
return ir_gen_prefix_op_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeContainerInitExpr:
|
|
return ir_gen_container_init_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeVariableDeclaration:
|
|
return ir_gen_var_decl(irb, scope, node);
|
|
case NodeTypeWhileExpr:
|
|
return ir_gen_while_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeForExpr:
|
|
return ir_gen_for_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeArrayAccessExpr:
|
|
return ir_gen_array_access(irb, scope, node, lval, result_loc);
|
|
case NodeTypeReturnExpr:
|
|
return ir_gen_return(irb, scope, node, lval, result_loc);
|
|
case NodeTypeFieldAccessExpr:
|
|
{
|
|
IrInstSrc *ptr_instruction = ir_gen_field_access(irb, scope, node);
|
|
if (ptr_instruction == irb->codegen->invalid_inst_src)
|
|
return ptr_instruction;
|
|
if (lval == LValPtr || lval == LValAssign)
|
|
return ptr_instruction;
|
|
|
|
IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
|
|
return ir_expr_wrap(irb, scope, load_ptr, result_loc);
|
|
}
|
|
case NodeTypePtrDeref: {
|
|
AstNode *expr_node = node->data.ptr_deref_expr.target;
|
|
|
|
LVal child_lval = lval;
|
|
if (child_lval == LValAssign)
|
|
child_lval = LValPtr;
|
|
|
|
IrInstSrc *value = ir_gen_node_extra(irb, expr_node, scope, child_lval, nullptr);
|
|
if (value == irb->codegen->invalid_inst_src)
|
|
return value;
|
|
|
|
// We essentially just converted any lvalue from &(x.*) to (&x).*;
|
|
// this inhibits checking that x is a pointer later, so we directly
|
|
// record whether the pointer check is needed
|
|
IrInstSrc *un_op = ir_build_un_op_lval(irb, scope, node, IrUnOpDereference, value, lval, result_loc);
|
|
return ir_expr_wrap(irb, scope, un_op, result_loc);
|
|
}
|
|
case NodeTypeUnwrapOptional: {
|
|
AstNode *expr_node = node->data.unwrap_optional.expr;
|
|
|
|
IrInstSrc *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
|
|
if (maybe_ptr == irb->codegen->invalid_inst_src)
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true );
|
|
if (lval == LValPtr || lval == LValAssign)
|
|
return unwrapped_ptr;
|
|
|
|
IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
|
|
return ir_expr_wrap(irb, scope, load_ptr, result_loc);
|
|
}
|
|
case NodeTypeBoolLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval, result_loc);
|
|
case NodeTypeArrayType:
|
|
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
|
|
case NodeTypePointerType:
|
|
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
|
|
case NodeTypeAnyFrameType:
|
|
return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
|
|
case NodeTypeStringLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
|
|
case NodeTypeUndefinedLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_undefined_literal(irb, scope, node), lval, result_loc);
|
|
case NodeTypeAsmExpr:
|
|
return ir_lval_wrap(irb, scope, ir_gen_asm_expr(irb, scope, node), lval, result_loc);
|
|
case NodeTypeNullLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_null_literal(irb, scope, node), lval, result_loc);
|
|
case NodeTypeIfErrorExpr:
|
|
return ir_gen_if_err_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeIfOptional:
|
|
return ir_gen_if_optional_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeSwitchExpr:
|
|
return ir_gen_switch_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeCompTime:
|
|
return ir_expr_wrap(irb, scope, ir_gen_comptime(irb, scope, node, lval), result_loc);
|
|
case NodeTypeNoSuspend:
|
|
return ir_expr_wrap(irb, scope, ir_gen_nosuspend(irb, scope, node, lval), result_loc);
|
|
case NodeTypeErrorType:
|
|
return ir_lval_wrap(irb, scope, ir_gen_error_type(irb, scope, node), lval, result_loc);
|
|
case NodeTypeBreak:
|
|
return ir_lval_wrap(irb, scope, ir_gen_break(irb, scope, node), lval, result_loc);
|
|
case NodeTypeContinue:
|
|
return ir_lval_wrap(irb, scope, ir_gen_continue(irb, scope, node), lval, result_loc);
|
|
case NodeTypeUnreachable:
|
|
return ir_build_unreachable(irb, scope, node);
|
|
case NodeTypeDefer:
|
|
return ir_lval_wrap(irb, scope, ir_gen_defer(irb, scope, node), lval, result_loc);
|
|
case NodeTypeSliceExpr:
|
|
return ir_gen_slice(irb, scope, node, lval, result_loc);
|
|
case NodeTypeCatchExpr:
|
|
return ir_gen_catch(irb, scope, node, lval, result_loc);
|
|
case NodeTypeContainerDecl:
|
|
return ir_lval_wrap(irb, scope, ir_gen_container_decl(irb, scope, node), lval, result_loc);
|
|
case NodeTypeFnProto:
|
|
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
|
|
case NodeTypeErrorSetDecl:
|
|
return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
|
|
case NodeTypeResume:
|
|
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
|
|
case NodeTypeAwaitExpr:
|
|
return ir_gen_await_expr(irb, scope, node, lval, result_loc);
|
|
case NodeTypeSuspend:
|
|
return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc);
|
|
case NodeTypeEnumLiteral:
|
|
return ir_lval_wrap(irb, scope, ir_gen_enum_literal(irb, scope, node), lval, result_loc);
|
|
case NodeTypeInferredArrayType:
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("inferred array size invalid here"));
|
|
return irb->codegen->invalid_inst_src;
|
|
case NodeTypeAnyTypeField:
|
|
return ir_lval_wrap(irb, scope,
|
|
ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_anytype), lval, result_loc);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ResultLoc *no_result_loc(void) {
|
|
ResultLocNone *result_loc_none = heap::c_allocator.create<ResultLocNone>();
|
|
result_loc_none->base.id = ResultLocIdNone;
|
|
return &result_loc_none->base;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_node_extra(IrBuilderSrc *irb, AstNode *node, Scope *scope, LVal lval,
|
|
ResultLoc *result_loc)
|
|
{
|
|
if (lval == LValAssign) {
|
|
switch (node->type) {
|
|
case NodeTypeStructValueField:
|
|
case NodeTypeParamDecl:
|
|
case NodeTypeUsingNamespace:
|
|
case NodeTypeSwitchProng:
|
|
case NodeTypeSwitchRange:
|
|
case NodeTypeStructField:
|
|
case NodeTypeErrorSetField:
|
|
case NodeTypeFnDef:
|
|
case NodeTypeTestDecl:
|
|
zig_unreachable();
|
|
|
|
// cannot be assigned to
|
|
case NodeTypeBlock:
|
|
case NodeTypeGroupedExpr:
|
|
case NodeTypeBinOpExpr:
|
|
case NodeTypeIntLiteral:
|
|
case NodeTypeFloatLiteral:
|
|
case NodeTypeCharLiteral:
|
|
case NodeTypeIfBoolExpr:
|
|
case NodeTypeContainerInitExpr:
|
|
case NodeTypeVariableDeclaration:
|
|
case NodeTypeWhileExpr:
|
|
case NodeTypeForExpr:
|
|
case NodeTypeReturnExpr:
|
|
case NodeTypeBoolLiteral:
|
|
case NodeTypeArrayType:
|
|
case NodeTypePointerType:
|
|
case NodeTypeAnyFrameType:
|
|
case NodeTypeStringLiteral:
|
|
case NodeTypeUndefinedLiteral:
|
|
case NodeTypeAsmExpr:
|
|
case NodeTypeNullLiteral:
|
|
case NodeTypeIfErrorExpr:
|
|
case NodeTypeIfOptional:
|
|
case NodeTypeSwitchExpr:
|
|
case NodeTypeCompTime:
|
|
case NodeTypeNoSuspend:
|
|
case NodeTypeErrorType:
|
|
case NodeTypeBreak:
|
|
case NodeTypeContinue:
|
|
case NodeTypeUnreachable:
|
|
case NodeTypeDefer:
|
|
case NodeTypeSliceExpr:
|
|
case NodeTypeCatchExpr:
|
|
case NodeTypeContainerDecl:
|
|
case NodeTypeFnProto:
|
|
case NodeTypeErrorSetDecl:
|
|
case NodeTypeResume:
|
|
case NodeTypeAwaitExpr:
|
|
case NodeTypeSuspend:
|
|
case NodeTypeEnumLiteral:
|
|
case NodeTypeInferredArrayType:
|
|
case NodeTypeAnyTypeField:
|
|
case NodeTypePrefixOpExpr:
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("invalid left-hand side to assignment"));
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
// @field can be assigned to
|
|
case NodeTypeFnCallExpr:
|
|
if (node->data.fn_call_expr.modifier == CallModifierBuiltin) {
|
|
AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr;
|
|
Buf *name = fn_ref_expr->data.symbol_expr.symbol;
|
|
auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
|
|
|
|
if (!entry) {
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("invalid builtin function: '%s'", buf_ptr(name)));
|
|
return irb->codegen->invalid_inst_src;
|
|
}
|
|
|
|
if (entry->value->id == BuiltinFnIdField) {
|
|
break;
|
|
}
|
|
}
|
|
add_node_error(irb->codegen, node,
|
|
buf_sprintf("invalid left-hand side to assignment"));
|
|
return irb->codegen->invalid_inst_src;
|
|
|
|
|
|
// can be assigned to
|
|
case NodeTypeUnwrapOptional:
|
|
case NodeTypePtrDeref:
|
|
case NodeTypeFieldAccessExpr:
|
|
case NodeTypeArrayAccessExpr:
|
|
case NodeTypeSymbol:
|
|
break;
|
|
}
|
|
}
|
|
if (result_loc == nullptr) {
|
|
// Create a result location indicating there is none - but if one gets created
|
|
// it will be properly distributed.
|
|
result_loc = no_result_loc();
|
|
ir_build_reset_result(irb, scope, node, result_loc);
|
|
}
|
|
Scope *child_scope;
|
|
if (irb->exec->is_inline ||
|
|
(irb->exec->fn_entry != nullptr && irb->exec->fn_entry->child_scope == scope))
|
|
{
|
|
child_scope = scope;
|
|
} else {
|
|
child_scope = &create_expr_scope(irb->codegen, node, scope)->base;
|
|
}
|
|
IrInstSrc *result = ir_gen_node_raw(irb, node, child_scope, lval, result_loc);
|
|
if (result == irb->codegen->invalid_inst_src) {
|
|
if (irb->exec->first_err_trace_msg == nullptr) {
|
|
irb->exec->first_err_trace_msg = irb->codegen->trace_err;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstSrc *ir_gen_node(IrBuilderSrc *irb, AstNode *node, Scope *scope) {
|
|
return ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
|
|
}
|
|
|
|
static void invalidate_exec(IrExecutableSrc *exec, ErrorMsg *msg) {
|
|
if (exec->first_err_trace_msg != nullptr)
|
|
return;
|
|
|
|
exec->first_err_trace_msg = msg;
|
|
|
|
for (size_t i = 0; i < exec->tld_list.length; i += 1) {
|
|
exec->tld_list.items[i]->resolution = TldResolutionInvalid;
|
|
}
|
|
}
|
|
|
|
static void invalidate_exec_gen(IrExecutableGen *exec, ErrorMsg *msg) {
|
|
if (exec->first_err_trace_msg != nullptr)
|
|
return;
|
|
|
|
exec->first_err_trace_msg = msg;
|
|
|
|
for (size_t i = 0; i < exec->tld_list.length; i += 1) {
|
|
exec->tld_list.items[i]->resolution = TldResolutionInvalid;
|
|
}
|
|
|
|
if (exec->source_exec != nullptr)
|
|
invalidate_exec(exec->source_exec, msg);
|
|
}
|
|
|
|
|
|
bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutableSrc *ir_executable) {
|
|
assert(node->owner);
|
|
|
|
IrBuilderSrc ir_builder = {0};
|
|
IrBuilderSrc *irb = &ir_builder;
|
|
|
|
irb->codegen = codegen;
|
|
irb->exec = ir_executable;
|
|
irb->main_block_node = node;
|
|
|
|
IrBasicBlockSrc *entry_block = ir_create_basic_block(irb, scope, "Entry");
|
|
ir_set_cursor_at_end_and_append_block(irb, entry_block);
|
|
// Entry block gets a reference because we enter it to begin.
|
|
ir_ref_bb(irb->current_basic_block);
|
|
|
|
IrInstSrc *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
|
|
|
|
if (result == irb->codegen->invalid_inst_src)
|
|
return false;
|
|
|
|
if (irb->exec->first_err_trace_msg != nullptr) {
|
|
codegen->trace_err = irb->exec->first_err_trace_msg;
|
|
return false;
|
|
}
|
|
|
|
if (!instr_is_unreachable(result)) {
|
|
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->base.source_node, result, nullptr));
|
|
// no need for save_err_ret_addr because this cannot return error
|
|
ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
|
|
result_loc_ret->base.id = ResultLocIdReturn;
|
|
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
|
|
ir_mark_gen(ir_build_end_expr(irb, scope, node, result, &result_loc_ret->base));
|
|
ir_mark_gen(ir_build_return_src(irb, scope, result->base.source_node, result));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) {
|
|
assert(fn_entry);
|
|
|
|
IrExecutableSrc *ir_executable = fn_entry->ir_executable;
|
|
AstNode *body_node = fn_entry->body_node;
|
|
|
|
assert(fn_entry->child_scope);
|
|
|
|
return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable);
|
|
}
|
|
|
|
static void ir_add_call_stack_errors_gen(CodeGen *codegen, IrExecutableGen *exec, ErrorMsg *err_msg, int limit) {
|
|
if (!exec || !exec->source_node || limit < 0) return;
|
|
add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));
|
|
|
|
ir_add_call_stack_errors_gen(codegen, exec->parent_exec, err_msg, limit - 1);
|
|
}
|
|
|
|
static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutableSrc *exec, ErrorMsg *err_msg, int limit) {
|
|
if (!exec || !exec->source_node || limit < 0) return;
|
|
add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));
|
|
|
|
ir_add_call_stack_errors_gen(codegen, exec->parent_exec, err_msg, limit - 1);
|
|
}
|
|
|
|
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutableSrc *exec, AstNode *source_node, Buf *msg) {
|
|
ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
|
|
invalidate_exec(exec, err_msg);
|
|
if (exec->parent_exec) {
|
|
ir_add_call_stack_errors(codegen, exec, err_msg, 10);
|
|
}
|
|
return err_msg;
|
|
}
|
|
|
|
static ErrorMsg *exec_add_error_node_gen(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node, Buf *msg) {
|
|
ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
|
|
invalidate_exec_gen(exec, err_msg);
|
|
if (exec->parent_exec) {
|
|
ir_add_call_stack_errors_gen(codegen, exec, err_msg, 10);
|
|
}
|
|
return err_msg;
|
|
}
|
|
|
|
static ErrorMsg *ir_add_error_node(IrAnalyze *ira, AstNode *source_node, Buf *msg) {
|
|
return exec_add_error_node_gen(ira->codegen, ira->new_irb.exec, source_node, msg);
|
|
}
|
|
|
|
static ErrorMsg *opt_ir_add_error_node(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, Buf *msg) {
|
|
if (ira != nullptr)
|
|
return exec_add_error_node_gen(codegen, ira->new_irb.exec, source_node, msg);
|
|
else
|
|
return add_node_error(codegen, source_node, msg);
|
|
}
|
|
|
|
static ErrorMsg *ir_add_error(IrAnalyze *ira, IrInst *source_instruction, Buf *msg) {
|
|
return ir_add_error_node(ira, source_instruction->source_node, msg);
|
|
}
|
|
|
|
static void ir_assert_impl(bool ok, IrInst *source_instruction, char const *file, unsigned int line) {
|
|
if (ok) return;
|
|
src_assert_impl(ok, source_instruction->source_node, file, line);
|
|
}
|
|
|
|
static void ir_assert_gen_impl(bool ok, IrInstGen *source_instruction, char const *file, unsigned int line) {
|
|
if (ok) return;
|
|
src_assert_impl(ok, source_instruction->base.source_node, file, line);
|
|
}
|
|
|
|
// This function takes a comptime ptr and makes the child const value conform to the type
|
|
// described by the pointer.
|
|
static Error eval_comptime_ptr_reinterpret(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
|
|
ZigValue *ptr_val)
|
|
{
|
|
Error err;
|
|
assert(ptr_val->type->id == ZigTypeIdPointer);
|
|
assert(ptr_val->special == ConstValSpecialStatic);
|
|
ZigValue tmp = {};
|
|
tmp.special = ConstValSpecialStatic;
|
|
tmp.type = ptr_val->type->data.pointer.child_type;
|
|
if ((err = ir_read_const_ptr(ira, codegen, source_node, &tmp, ptr_val)))
|
|
return err;
|
|
ZigValue *child_val = const_ptr_pointee_unchecked(codegen, ptr_val);
|
|
copy_const_val(codegen, child_val, &tmp);
|
|
return ErrorNone;
|
|
}
|
|
|
|
ZigValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ZigValue *const_val,
|
|
AstNode *source_node)
|
|
{
|
|
Error err;
|
|
ZigValue *val = const_ptr_pointee_unchecked(codegen, const_val);
|
|
if (val == nullptr) return nullptr;
|
|
assert(const_val->type->id == ZigTypeIdPointer);
|
|
ZigType *expected_type = const_val->type->data.pointer.child_type;
|
|
if (expected_type == codegen->builtin_types.entry_anytype) {
|
|
return val;
|
|
}
|
|
switch (type_has_one_possible_value(codegen, expected_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return nullptr;
|
|
case OnePossibleValueNo:
|
|
break;
|
|
case OnePossibleValueYes:
|
|
return get_the_one_possible_value(codegen, expected_type);
|
|
}
|
|
if (!types_have_same_zig_comptime_repr(codegen, expected_type, val->type)) {
|
|
if ((err = eval_comptime_ptr_reinterpret(ira, codegen, source_node, const_val)))
|
|
return nullptr;
|
|
return const_ptr_pointee_unchecked(codegen, const_val);
|
|
}
|
|
return val;
|
|
}
|
|
|
|
static Error ir_exec_scan_for_side_effects(CodeGen *codegen, IrExecutableGen *exec) {
|
|
IrBasicBlockGen *bb = exec->basic_block_list.at(0);
|
|
for (size_t i = 0; i < bb->instruction_list.length; i += 1) {
|
|
IrInstGen *instruction = bb->instruction_list.at(i);
|
|
if (instruction->id == IrInstGenIdReturn) {
|
|
return ErrorNone;
|
|
} else if (ir_inst_gen_has_side_effects(instruction)) {
|
|
if (instr_is_comptime(instruction)) {
|
|
switch (instruction->id) {
|
|
case IrInstGenIdUnwrapErrPayload:
|
|
case IrInstGenIdOptionalUnwrapPtr:
|
|
case IrInstGenIdUnionFieldPtr:
|
|
continue;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
if (get_scope_typeof(instruction->base.scope) != nullptr) {
|
|
// doesn't count, it's inside a @TypeOf()
|
|
continue;
|
|
}
|
|
exec_add_error_node_gen(codegen, exec, instruction->base.source_node,
|
|
buf_sprintf("unable to evaluate constant expression"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static bool ir_emit_global_runtime_side_effect(IrAnalyze *ira, IrInst* source_instruction) {
|
|
if (ir_should_inline(ira->old_irb.exec, source_instruction->scope)) {
|
|
ir_add_error(ira, source_instruction, buf_sprintf("unable to evaluate constant expression"));
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool const_val_fits_in_num_lit(ZigValue *const_val, ZigType *num_lit_type) {
|
|
return ((num_lit_type->id == ZigTypeIdComptimeFloat &&
|
|
(const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat)) ||
|
|
(num_lit_type->id == ZigTypeIdComptimeInt &&
|
|
(const_val->type->id == ZigTypeIdInt || const_val->type->id == ZigTypeIdComptimeInt)));
|
|
}
|
|
|
|
static bool float_has_fraction(ZigValue *const_val) {
|
|
if (const_val->type->id == ZigTypeIdComptimeFloat) {
|
|
return bigfloat_has_fraction(&const_val->data.x_bigfloat);
|
|
} else if (const_val->type->id == ZigTypeIdFloat) {
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
{
|
|
float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false);
|
|
return !f16_eq(floored, const_val->data.x_f16);
|
|
}
|
|
case 32:
|
|
return floorf(const_val->data.x_f32) != const_val->data.x_f32;
|
|
case 64:
|
|
return floor(const_val->data.x_f64) != const_val->data.x_f64;
|
|
case 128:
|
|
{
|
|
float128_t floored;
|
|
f128M_roundToInt(&const_val->data.x_f128, softfloat_round_minMag, false, &floored);
|
|
return !f128M_eq(&floored, &const_val->data.x_f128);
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_append_buf(Buf *buf, ZigValue *const_val) {
|
|
if (const_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_append_buf(buf, &const_val->data.x_bigfloat);
|
|
} else if (const_val->type->id == ZigTypeIdFloat) {
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
|
|
break;
|
|
case 32:
|
|
buf_appendf(buf, "%f", const_val->data.x_f32);
|
|
break;
|
|
case 64:
|
|
buf_appendf(buf, "%f", const_val->data.x_f64);
|
|
break;
|
|
case 128:
|
|
{
|
|
// TODO actual implementation
|
|
const size_t extra_len = 100;
|
|
size_t old_len = buf_len(buf);
|
|
buf_resize(buf, old_len + extra_len);
|
|
|
|
float64_t f64_value = f128M_to_f64(&const_val->data.x_f128);
|
|
double double_value;
|
|
memcpy(&double_value, &f64_value, sizeof(double));
|
|
|
|
int len = snprintf(buf_ptr(buf) + old_len, extra_len, "%f", double_value);
|
|
assert(len > 0);
|
|
buf_resize(buf, old_len + len);
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_bigint(BigInt *bigint, ZigValue *const_val) {
|
|
if (const_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat);
|
|
} else if (const_val->type->id == ZigTypeIdFloat) {
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
{
|
|
double x = zig_f16_to_double(const_val->data.x_f16);
|
|
if (x >= 0) {
|
|
bigint_init_unsigned(bigint, (uint64_t)x);
|
|
} else {
|
|
bigint_init_unsigned(bigint, (uint64_t)-x);
|
|
bigint->is_negative = true;
|
|
}
|
|
break;
|
|
}
|
|
case 32:
|
|
if (const_val->data.x_f32 >= 0) {
|
|
bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32));
|
|
} else {
|
|
bigint_init_unsigned(bigint, (uint64_t)(-const_val->data.x_f32));
|
|
bigint->is_negative = true;
|
|
}
|
|
break;
|
|
case 64:
|
|
if (const_val->data.x_f64 >= 0) {
|
|
bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f64));
|
|
} else {
|
|
bigint_init_unsigned(bigint, (uint64_t)(-const_val->data.x_f64));
|
|
bigint->is_negative = true;
|
|
}
|
|
break;
|
|
case 128:
|
|
{
|
|
BigFloat tmp_float;
|
|
bigfloat_init_128(&tmp_float, const_val->data.x_f128);
|
|
bigint_init_bigfloat(bigint, &tmp_float);
|
|
}
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_bigfloat(ZigValue *dest_val, BigFloat *bigfloat) {
|
|
if (dest_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat);
|
|
} else if (dest_val->type->id == ZigTypeIdFloat) {
|
|
switch (dest_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
dest_val->data.x_f16 = bigfloat_to_f16(bigfloat);
|
|
break;
|
|
case 32:
|
|
dest_val->data.x_f32 = bigfloat_to_f32(bigfloat);
|
|
break;
|
|
case 64:
|
|
dest_val->data.x_f64 = bigfloat_to_f64(bigfloat);
|
|
break;
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128:
|
|
dest_val->data.x_f128 = bigfloat_to_f128(bigfloat);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_f16(ZigValue *dest_val, float16_t x) {
|
|
if (dest_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_16(&dest_val->data.x_bigfloat, x);
|
|
} else if (dest_val->type->id == ZigTypeIdFloat) {
|
|
switch (dest_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
dest_val->data.x_f16 = x;
|
|
break;
|
|
case 32:
|
|
dest_val->data.x_f32 = zig_f16_to_double(x);
|
|
break;
|
|
case 64:
|
|
dest_val->data.x_f64 = zig_f16_to_double(x);
|
|
break;
|
|
case 128:
|
|
f16_to_f128M(x, &dest_val->data.x_f128);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_f32(ZigValue *dest_val, float x) {
|
|
if (dest_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_32(&dest_val->data.x_bigfloat, x);
|
|
} else if (dest_val->type->id == ZigTypeIdFloat) {
|
|
switch (dest_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
dest_val->data.x_f16 = zig_double_to_f16(x);
|
|
break;
|
|
case 32:
|
|
dest_val->data.x_f32 = x;
|
|
break;
|
|
case 64:
|
|
dest_val->data.x_f64 = x;
|
|
break;
|
|
case 128:
|
|
{
|
|
float32_t x_f32;
|
|
memcpy(&x_f32, &x, sizeof(float));
|
|
f32_to_f128M(x_f32, &dest_val->data.x_f128);
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_f64(ZigValue *dest_val, double x) {
|
|
if (dest_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_64(&dest_val->data.x_bigfloat, x);
|
|
} else if (dest_val->type->id == ZigTypeIdFloat) {
|
|
switch (dest_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
dest_val->data.x_f16 = zig_double_to_f16(x);
|
|
break;
|
|
case 32:
|
|
dest_val->data.x_f32 = x;
|
|
break;
|
|
case 64:
|
|
dest_val->data.x_f64 = x;
|
|
break;
|
|
case 128:
|
|
{
|
|
float64_t x_f64;
|
|
memcpy(&x_f64, &x, sizeof(double));
|
|
f64_to_f128M(x_f64, &dest_val->data.x_f128);
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_f128(ZigValue *dest_val, float128_t x) {
|
|
if (dest_val->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_128(&dest_val->data.x_bigfloat, x);
|
|
} else if (dest_val->type->id == ZigTypeIdFloat) {
|
|
switch (dest_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
dest_val->data.x_f16 = f128M_to_f16(&x);
|
|
break;
|
|
case 32:
|
|
{
|
|
float32_t f32_val = f128M_to_f32(&x);
|
|
memcpy(&dest_val->data.x_f32, &f32_val, sizeof(float));
|
|
break;
|
|
}
|
|
case 64:
|
|
{
|
|
float64_t f64_val = f128M_to_f64(&x);
|
|
memcpy(&dest_val->data.x_f64, &f64_val, sizeof(double));
|
|
break;
|
|
}
|
|
case 128:
|
|
{
|
|
memcpy(&dest_val->data.x_f128, &x, sizeof(float128_t));
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_init_float(ZigValue *dest_val, ZigValue *src_val) {
|
|
if (src_val->type->id == ZigTypeIdComptimeFloat) {
|
|
float_init_bigfloat(dest_val, &src_val->data.x_bigfloat);
|
|
} else if (src_val->type->id == ZigTypeIdFloat) {
|
|
switch (src_val->type->data.floating.bit_count) {
|
|
case 16:
|
|
float_init_f16(dest_val, src_val->data.x_f16);
|
|
break;
|
|
case 32:
|
|
float_init_f32(dest_val, src_val->data.x_f32);
|
|
break;
|
|
case 64:
|
|
float_init_f64(dest_val, src_val->data.x_f64);
|
|
break;
|
|
case 128:
|
|
float_init_f128(dest_val, src_val->data.x_f128);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static bool float_is_nan(ZigValue *op) {
|
|
if (op->type->id == ZigTypeIdComptimeFloat) {
|
|
return bigfloat_is_nan(&op->data.x_bigfloat);
|
|
} else if (op->type->id == ZigTypeIdFloat) {
|
|
switch (op->type->data.floating.bit_count) {
|
|
case 16:
|
|
return zig_f16_isNaN(op->data.x_f16);
|
|
case 32:
|
|
return op->data.x_f32 != op->data.x_f32;
|
|
case 64:
|
|
return op->data.x_f64 != op->data.x_f64;
|
|
case 128:
|
|
return zig_f128_isNaN(&op->data.x_f128);
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static Cmp float_cmp(ZigValue *op1, ZigValue *op2) {
|
|
if (op1->type == op2->type) {
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
if (f16_lt(op1->data.x_f16, op2->data.x_f16)) {
|
|
return CmpLT;
|
|
} else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) {
|
|
return CmpGT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
case 32:
|
|
if (op1->data.x_f32 > op2->data.x_f32) {
|
|
return CmpGT;
|
|
} else if (op1->data.x_f32 < op2->data.x_f32) {
|
|
return CmpLT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
case 64:
|
|
if (op1->data.x_f64 > op2->data.x_f64) {
|
|
return CmpGT;
|
|
} else if (op1->data.x_f64 < op2->data.x_f64) {
|
|
return CmpLT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
case 128:
|
|
if (f128M_lt(&op1->data.x_f128, &op2->data.x_f128)) {
|
|
return CmpLT;
|
|
} else if (f128M_eq(&op1->data.x_f128, &op2->data.x_f128)) {
|
|
return CmpEQ;
|
|
} else {
|
|
return CmpGT;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
BigFloat op1_big;
|
|
BigFloat op2_big;
|
|
value_to_bigfloat(&op1_big, op1);
|
|
value_to_bigfloat(&op2_big, op2);
|
|
return bigfloat_cmp(&op1_big, &op2_big);
|
|
}
|
|
|
|
// This function cannot handle NaN
|
|
static Cmp float_cmp_zero(ZigValue *op) {
|
|
if (op->type->id == ZigTypeIdComptimeFloat) {
|
|
return bigfloat_cmp_zero(&op->data.x_bigfloat);
|
|
} else if (op->type->id == ZigTypeIdFloat) {
|
|
switch (op->type->data.floating.bit_count) {
|
|
case 16:
|
|
{
|
|
const float16_t zero = zig_double_to_f16(0);
|
|
if (f16_lt(op->data.x_f16, zero)) {
|
|
return CmpLT;
|
|
} else if (f16_lt(zero, op->data.x_f16)) {
|
|
return CmpGT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
}
|
|
case 32:
|
|
if (op->data.x_f32 < 0.0) {
|
|
return CmpLT;
|
|
} else if (op->data.x_f32 > 0.0) {
|
|
return CmpGT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
case 64:
|
|
if (op->data.x_f64 < 0.0) {
|
|
return CmpLT;
|
|
} else if (op->data.x_f64 > 0.0) {
|
|
return CmpGT;
|
|
} else {
|
|
return CmpEQ;
|
|
}
|
|
case 128:
|
|
float128_t zero_float;
|
|
ui32_to_f128M(0, &zero_float);
|
|
if (f128M_lt(&op->data.x_f128, &zero_float)) {
|
|
return CmpLT;
|
|
} else if (f128M_eq(&op->data.x_f128, &zero_float)) {
|
|
return CmpEQ;
|
|
} else {
|
|
return CmpGT;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_add(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = op1->data.x_f32 + op2->data.x_f32;
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = op1->data.x_f64 + op2->data.x_f64;
|
|
return;
|
|
case 128:
|
|
f128M_add(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_sub(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32;
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = op1->data.x_f64 - op2->data.x_f64;
|
|
return;
|
|
case 128:
|
|
f128M_sub(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_mul(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32;
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = op1->data.x_f64 * op2->data.x_f64;
|
|
return;
|
|
case 128:
|
|
f128M_mul(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_div(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64;
|
|
return;
|
|
case 128:
|
|
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_div_trunc(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
|
|
out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_minMag, false);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = truncf(op1->data.x_f32 / op2->data.x_f32);
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64);
|
|
return;
|
|
case 128:
|
|
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
f128M_roundToInt(&out_val->data.x_f128, softfloat_round_minMag, false, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_div_floor(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
|
|
out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32);
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = floor(op1->data.x_f64 / op2->data.x_f64);
|
|
return;
|
|
case 128:
|
|
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
f128M_roundToInt(&out_val->data.x_f128, softfloat_round_min, false, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32);
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = fmod(op1->data.x_f64, op2->data.x_f64);
|
|
return;
|
|
case 128:
|
|
f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
// c = a - b * trunc(a / b)
|
|
static float16_t zig_f16_mod(float16_t a, float16_t b) {
|
|
float16_t c;
|
|
c = f16_div(a, b);
|
|
c = f16_roundToInt(c, softfloat_round_min, true);
|
|
c = f16_mul(b, c);
|
|
c = f16_sub(a, c);
|
|
return c;
|
|
}
|
|
|
|
// c = a - b * trunc(a / b)
|
|
static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) {
|
|
f128M_div(a, b, c);
|
|
f128M_roundToInt(c, softfloat_round_min, true, c);
|
|
f128M_mul(b, c, c);
|
|
f128M_sub(a, c, c);
|
|
}
|
|
|
|
static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
|
|
assert(op1->type == op2->type);
|
|
out_val->type = op1->type;
|
|
if (op1->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
|
|
} else if (op1->type->id == ZigTypeIdFloat) {
|
|
switch (op1->type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16);
|
|
return;
|
|
case 32:
|
|
out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32);
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64);
|
|
return;
|
|
case 128:
|
|
zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void float_negate(ZigValue *out_val, ZigValue *op) {
|
|
out_val->type = op->type;
|
|
if (op->type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat);
|
|
} else if (op->type->id == ZigTypeIdFloat) {
|
|
switch (op->type->data.floating.bit_count) {
|
|
case 16:
|
|
{
|
|
const float16_t zero = zig_double_to_f16(0);
|
|
out_val->data.x_f16 = f16_sub(zero, op->data.x_f16);
|
|
return;
|
|
}
|
|
case 32:
|
|
out_val->data.x_f32 = -op->data.x_f32;
|
|
return;
|
|
case 64:
|
|
out_val->data.x_f64 = -op->data.x_f64;
|
|
return;
|
|
case 128:
|
|
float128_t zero_f128;
|
|
ui32_to_f128M(0, &zero_f128);
|
|
f128M_sub(&zero_f128, &op->data.x_f128, &out_val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
void float_write_ieee597(ZigValue *op, uint8_t *buf, bool is_big_endian) {
|
|
if (op->type->id != ZigTypeIdFloat)
|
|
zig_unreachable();
|
|
|
|
const unsigned n = op->type->data.floating.bit_count / 8;
|
|
assert(n <= 16);
|
|
|
|
switch (op->type->data.floating.bit_count) {
|
|
case 16:
|
|
memcpy(buf, &op->data.x_f16, 2);
|
|
break;
|
|
case 32:
|
|
memcpy(buf, &op->data.x_f32, 4);
|
|
break;
|
|
case 64:
|
|
memcpy(buf, &op->data.x_f64, 8);
|
|
break;
|
|
case 128:
|
|
memcpy(buf, &op->data.x_f128, 16);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
|
|
if (is_big_endian) {
|
|
// Byteswap in place if needed
|
|
for (size_t i = 0; i < n / 2; i++) {
|
|
uint8_t u = buf[i];
|
|
buf[i] = buf[n - 1 - i];
|
|
buf[n - 1 - i] = u;
|
|
}
|
|
}
|
|
}
|
|
|
|
void float_read_ieee597(ZigValue *val, uint8_t *buf, bool is_big_endian) {
|
|
if (val->type->id != ZigTypeIdFloat)
|
|
zig_unreachable();
|
|
|
|
const unsigned n = val->type->data.floating.bit_count / 8;
|
|
assert(n <= 16);
|
|
|
|
uint8_t tmp[16];
|
|
uint8_t *ptr = buf;
|
|
|
|
if (is_big_endian) {
|
|
memcpy(tmp, buf, n);
|
|
|
|
// Byteswap if needed
|
|
for (size_t i = 0; i < n / 2; i++) {
|
|
uint8_t u = tmp[i];
|
|
tmp[i] = tmp[n - 1 - i];
|
|
tmp[n - 1 - i] = u;
|
|
}
|
|
|
|
ptr = tmp;
|
|
}
|
|
|
|
switch (val->type->data.floating.bit_count) {
|
|
case 16:
|
|
memcpy(&val->data.x_f16, ptr, 2);
|
|
return;
|
|
case 32:
|
|
memcpy(&val->data.x_f32, ptr, 4);
|
|
return;
|
|
case 64:
|
|
memcpy(&val->data.x_f64, ptr, 8);
|
|
return;
|
|
case 128:
|
|
memcpy(&val->data.x_f128, ptr, 16);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void value_to_bigfloat(BigFloat *out, ZigValue *val) {
|
|
switch (val->type->id) {
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdComptimeInt:
|
|
bigfloat_init_bigint(out, &val->data.x_bigint);
|
|
return;
|
|
case ZigTypeIdComptimeFloat:
|
|
*out = val->data.x_bigfloat;
|
|
return;
|
|
case ZigTypeIdFloat: switch (val->type->data.floating.bit_count) {
|
|
case 16:
|
|
bigfloat_init_16(out, val->data.x_f16);
|
|
return;
|
|
case 32:
|
|
bigfloat_init_32(out, val->data.x_f32);
|
|
return;
|
|
case 64:
|
|
bigfloat_init_64(out, val->data.x_f64);
|
|
return;
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128:
|
|
bigfloat_init_128(out, val->data.x_f128);
|
|
return;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstGen *instruction, ZigType *other_type,
|
|
bool explicit_cast)
|
|
{
|
|
if (type_is_invalid(other_type)) {
|
|
return false;
|
|
}
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, instruction, LazyOkNoUndef);
|
|
if (const_val == nullptr)
|
|
return false;
|
|
|
|
if (const_val->special == ConstValSpecialLazy) {
|
|
switch (const_val->data.x_lazy->id) {
|
|
case LazyValueIdAlignOf: {
|
|
// This is guaranteed to fit into a u29
|
|
if (other_type->id == ZigTypeIdComptimeInt)
|
|
return true;
|
|
size_t align_bits = get_align_amt_type(ira->codegen)->data.integral.bit_count;
|
|
if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
|
|
other_type->data.integral.bit_count >= align_bits)
|
|
{
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
case LazyValueIdSizeOf: {
|
|
// This is guaranteed to fit into a usize
|
|
if (other_type->id == ZigTypeIdComptimeInt)
|
|
return true;
|
|
size_t usize_bits = ira->codegen->builtin_types.entry_usize->data.integral.bit_count;
|
|
if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
|
|
other_type->data.integral.bit_count >= usize_bits)
|
|
{
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
const_val = ir_resolve_const(ira, instruction, UndefBad);
|
|
if (const_val == nullptr)
|
|
return false;
|
|
|
|
bool const_val_is_int = (const_val->type->id == ZigTypeIdInt || const_val->type->id == ZigTypeIdComptimeInt);
|
|
bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat);
|
|
assert(const_val_is_int || const_val_is_float);
|
|
|
|
if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) {
|
|
return true;
|
|
}
|
|
if (other_type->id == ZigTypeIdFloat) {
|
|
if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) {
|
|
return true;
|
|
}
|
|
if (const_val->type->id == ZigTypeIdInt) {
|
|
BigFloat tmp_bf;
|
|
bigfloat_init_bigint(&tmp_bf, &const_val->data.x_bigint);
|
|
BigFloat orig_bf;
|
|
switch (other_type->data.floating.bit_count) {
|
|
case 16: {
|
|
float16_t tmp = bigfloat_to_f16(&tmp_bf);
|
|
bigfloat_init_16(&orig_bf, tmp);
|
|
break;
|
|
}
|
|
case 32: {
|
|
float tmp = bigfloat_to_f32(&tmp_bf);
|
|
bigfloat_init_32(&orig_bf, tmp);
|
|
break;
|
|
}
|
|
case 64: {
|
|
double tmp = bigfloat_to_f64(&tmp_bf);
|
|
bigfloat_init_64(&orig_bf, tmp);
|
|
break;
|
|
}
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128: {
|
|
float128_t tmp = bigfloat_to_f128(&tmp_bf);
|
|
bigfloat_init_128(&orig_bf, tmp);
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
BigInt orig_bi;
|
|
bigint_init_bigfloat(&orig_bi, &orig_bf);
|
|
if (bigint_cmp(&orig_bi, &const_val->data.x_bigint) == CmpEQ) {
|
|
return true;
|
|
}
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("type %s cannot represent integer value %s",
|
|
buf_ptr(&other_type->name),
|
|
buf_ptr(val_buf)));
|
|
return false;
|
|
}
|
|
if (other_type->data.floating.bit_count >= const_val->type->data.floating.bit_count) {
|
|
return true;
|
|
}
|
|
switch (other_type->data.floating.bit_count) {
|
|
case 16:
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 32: {
|
|
float16_t tmp = zig_double_to_f16(const_val->data.x_f32);
|
|
float orig = zig_f16_to_double(tmp);
|
|
if (const_val->data.x_f32 == orig) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
case 64: {
|
|
float16_t tmp = zig_double_to_f16(const_val->data.x_f64);
|
|
double orig = zig_f16_to_double(tmp);
|
|
if (const_val->data.x_f64 == orig) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128: {
|
|
float16_t tmp = f128M_to_f16(&const_val->data.x_f128);
|
|
float128_t orig;
|
|
f16_to_f128M(tmp, &orig);
|
|
if (f128M_eq(&orig, &const_val->data.x_f128)) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
case 32:
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 64: {
|
|
float tmp = const_val->data.x_f64;
|
|
double orig = tmp;
|
|
if (const_val->data.x_f64 == orig) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128: {
|
|
float32_t tmp = f128M_to_f32(&const_val->data.x_f128);
|
|
float128_t orig;
|
|
f32_to_f128M(tmp, &orig);
|
|
if (f128M_eq(&orig, &const_val->data.x_f128)) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
case 64:
|
|
switch (const_val->type->data.floating.bit_count) {
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128: {
|
|
float64_t tmp = f128M_to_f64(&const_val->data.x_f128);
|
|
float128_t orig;
|
|
f64_to_f128M(tmp, &orig);
|
|
if (f128M_eq(&orig, &const_val->data.x_f128)) {
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
case 80:
|
|
assert(const_val->type->data.floating.bit_count == 128);
|
|
zig_panic("TODO");
|
|
case 128:
|
|
return true;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
Buf *val_buf = buf_alloc();
|
|
float_append_buf(val_buf, const_val);
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("cast of value %s to type '%s' loses information",
|
|
buf_ptr(val_buf),
|
|
buf_ptr(&other_type->name)));
|
|
return false;
|
|
} else if (other_type->id == ZigTypeIdInt && const_val_is_int) {
|
|
if (!other_type->data.integral.is_signed && const_val->data.x_bigint.is_negative) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("cannot cast negative value %s to unsigned integer type '%s'",
|
|
buf_ptr(val_buf),
|
|
buf_ptr(&other_type->name)));
|
|
return false;
|
|
}
|
|
if (bigint_fits_in_bits(&const_val->data.x_bigint, other_type->data.integral.bit_count,
|
|
other_type->data.integral.is_signed))
|
|
{
|
|
return true;
|
|
}
|
|
} else if (const_val_fits_in_num_lit(const_val, other_type)) {
|
|
return true;
|
|
} else if (other_type->id == ZigTypeIdOptional) {
|
|
ZigType *child_type = other_type->data.maybe.child_type;
|
|
if (const_val_fits_in_num_lit(const_val, child_type)) {
|
|
return true;
|
|
} else if (child_type->id == ZigTypeIdInt && const_val_is_int) {
|
|
if (!child_type->data.integral.is_signed && const_val->data.x_bigint.is_negative) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("cannot cast negative value %s to unsigned integer type '%s'",
|
|
buf_ptr(val_buf),
|
|
buf_ptr(&child_type->name)));
|
|
return false;
|
|
}
|
|
if (bigint_fits_in_bits(&const_val->data.x_bigint,
|
|
child_type->data.integral.bit_count,
|
|
child_type->data.integral.is_signed))
|
|
{
|
|
return true;
|
|
}
|
|
} else if (child_type->id == ZigTypeIdFloat && const_val_is_float) {
|
|
return true;
|
|
}
|
|
}
|
|
if (explicit_cast && (other_type->id == ZigTypeIdInt || other_type->id == ZigTypeIdComptimeInt) &&
|
|
const_val_is_float)
|
|
{
|
|
if (float_has_fraction(const_val)) {
|
|
Buf *val_buf = buf_alloc();
|
|
float_append_buf(val_buf, const_val);
|
|
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("fractional component prevents float value %s from being casted to type '%s'",
|
|
buf_ptr(val_buf),
|
|
buf_ptr(&other_type->name)));
|
|
return false;
|
|
} else {
|
|
if (other_type->id == ZigTypeIdComptimeInt) {
|
|
return true;
|
|
} else {
|
|
BigInt bigint;
|
|
float_init_bigint(&bigint, const_val);
|
|
if (bigint_fits_in_bits(&bigint, other_type->data.integral.bit_count,
|
|
other_type->data.integral.is_signed))
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
const char *num_lit_str;
|
|
Buf *val_buf = buf_alloc();
|
|
if (const_val_is_float) {
|
|
num_lit_str = "float";
|
|
float_append_buf(val_buf, const_val);
|
|
} else {
|
|
num_lit_str = "integer";
|
|
bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
|
|
}
|
|
|
|
ir_add_error_node(ira, instruction->base.source_node,
|
|
buf_sprintf("%s value %s cannot be coerced to type '%s'",
|
|
num_lit_str,
|
|
buf_ptr(val_buf),
|
|
buf_ptr(&other_type->name)));
|
|
return false;
|
|
}
|
|
|
|
static bool is_tagged_union(ZigType *type) {
|
|
if (type->id != ZigTypeIdUnion)
|
|
return false;
|
|
return (type->data.unionation.decl_node->data.container_decl.auto_enum ||
|
|
type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr);
|
|
}
|
|
|
|
static void populate_error_set_table(ErrorTableEntry **errors, ZigType *set) {
|
|
assert(set->id == ZigTypeIdErrorSet);
|
|
for (uint32_t i = 0; i < set->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = set->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
}
|
|
|
|
static ErrorTableEntry *better_documented_error(ErrorTableEntry *preferred, ErrorTableEntry *other) {
|
|
if (preferred->decl_node->type == NodeTypeErrorSetField)
|
|
return preferred;
|
|
if (other->decl_node->type == NodeTypeErrorSetField)
|
|
return other;
|
|
return preferred;
|
|
}
|
|
|
|
static ZigType *get_error_set_intersection(IrAnalyze *ira, ZigType *set1, ZigType *set2,
|
|
AstNode *source_node)
|
|
{
|
|
assert(set1->id == ZigTypeIdErrorSet);
|
|
assert(set2->id == ZigTypeIdErrorSet);
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, set1, source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (!resolve_inferred_error_set(ira->codegen, set2, source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (type_is_global_error_set(set1)) {
|
|
return set2;
|
|
}
|
|
if (type_is_global_error_set(set2)) {
|
|
return set1;
|
|
}
|
|
size_t errors_count = ira->codegen->errors_by_index.length;
|
|
ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
|
|
populate_error_set_table(errors, set1);
|
|
ZigList<ErrorTableEntry *> intersection_list = {};
|
|
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
buf_resize(&err_set_type->name, 0);
|
|
buf_appendf(&err_set_type->name, "error{");
|
|
|
|
bool need_comma = false;
|
|
for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
|
|
ErrorTableEntry *existing_entry = errors[error_entry->value];
|
|
if (existing_entry != nullptr) {
|
|
// prefer the one with docs
|
|
const char *comma = need_comma ? "," : "";
|
|
need_comma = true;
|
|
ErrorTableEntry *existing_entry_with_docs = better_documented_error(existing_entry, error_entry);
|
|
intersection_list.append(existing_entry_with_docs);
|
|
buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&existing_entry_with_docs->name));
|
|
}
|
|
}
|
|
heap::c_allocator.deallocate(errors, errors_count);
|
|
|
|
err_set_type->data.error_set.err_count = intersection_list.length;
|
|
err_set_type->data.error_set.errors = intersection_list.items;
|
|
err_set_type->size_in_bits = ira->codegen->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = ira->codegen->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = ira->codegen->builtin_types.entry_global_error_set->abi_size;
|
|
|
|
buf_appendf(&err_set_type->name, "}");
|
|
|
|
return err_set_type;
|
|
}
|
|
|
|
static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted_type,
|
|
ZigType *actual_type, AstNode *source_node, bool wanted_is_mutable)
|
|
{
|
|
CodeGen *g = ira->codegen;
|
|
ConstCastOnly result = {};
|
|
result.id = ConstCastResultIdOk;
|
|
|
|
Error err;
|
|
|
|
if (wanted_type == actual_type)
|
|
return result;
|
|
|
|
// If pointers have the same representation in memory, they can be "const-casted".
|
|
// `const` attribute can be gained
|
|
// `volatile` attribute can be gained
|
|
// `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer)
|
|
// but only if !wanted_is_mutable
|
|
// alignment can be decreased
|
|
// bit offset attributes must match exactly
|
|
// PtrLenSingle/PtrLenUnknown must match exactly, but PtrLenC matches either one
|
|
// sentinel-terminated pointers can coerce into PtrLenUnknown
|
|
ZigType *wanted_ptr_type = get_src_ptr_type(wanted_type);
|
|
ZigType *actual_ptr_type = get_src_ptr_type(actual_type);
|
|
bool wanted_allows_zero = ptr_allows_addr_zero(wanted_type);
|
|
bool actual_allows_zero = ptr_allows_addr_zero(actual_type);
|
|
bool wanted_is_c_ptr = wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC;
|
|
bool actual_is_c_ptr = actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenC;
|
|
bool wanted_opt_or_ptr = wanted_ptr_type != nullptr && wanted_ptr_type->id == ZigTypeIdPointer;
|
|
bool actual_opt_or_ptr = actual_ptr_type != nullptr && actual_ptr_type->id == ZigTypeIdPointer;
|
|
if (wanted_opt_or_ptr && actual_opt_or_ptr) {
|
|
bool ok_null_term_ptrs =
|
|
wanted_ptr_type->data.pointer.sentinel == nullptr ||
|
|
(actual_ptr_type->data.pointer.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, wanted_ptr_type->data.pointer.sentinel,
|
|
actual_ptr_type->data.pointer.sentinel)) ||
|
|
actual_ptr_type->data.pointer.ptr_len == PtrLenC;
|
|
if (!ok_null_term_ptrs) {
|
|
result.id = ConstCastResultIdPtrSentinel;
|
|
result.data.bad_ptr_sentinel = heap::c_allocator.allocate_nonzero<ConstCastPtrSentinel>(1);
|
|
result.data.bad_ptr_sentinel->wanted_type = wanted_ptr_type;
|
|
result.data.bad_ptr_sentinel->actual_type = actual_ptr_type;
|
|
return result;
|
|
}
|
|
bool ptr_lens_equal = actual_ptr_type->data.pointer.ptr_len == wanted_ptr_type->data.pointer.ptr_len;
|
|
if (!(ptr_lens_equal || wanted_is_c_ptr || actual_is_c_ptr)) {
|
|
result.id = ConstCastResultIdPtrLens;
|
|
return result;
|
|
}
|
|
|
|
bool ok_cv_qualifiers =
|
|
(!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
|
|
(!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile);
|
|
if (!ok_cv_qualifiers) {
|
|
result.id = ConstCastResultIdCV;
|
|
result.data.bad_cv = heap::c_allocator.allocate_nonzero<ConstCastBadCV>(1);
|
|
result.data.bad_cv->wanted_type = wanted_ptr_type;
|
|
result.data.bad_cv->actual_type = actual_ptr_type;
|
|
return result;
|
|
}
|
|
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
|
|
actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdPointerChild;
|
|
result.data.pointer_mismatch = heap::c_allocator.allocate_nonzero<ConstCastPointerMismatch>(1);
|
|
result.data.pointer_mismatch->child = child;
|
|
result.data.pointer_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
|
|
result.data.pointer_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
|
|
return result;
|
|
}
|
|
bool ok_allows_zero = (wanted_allows_zero &&
|
|
(actual_allows_zero || !wanted_is_mutable)) ||
|
|
(!wanted_allows_zero && !actual_allows_zero);
|
|
if (!ok_allows_zero) {
|
|
result.id = ConstCastResultIdBadAllowsZero;
|
|
result.data.bad_allows_zero = heap::c_allocator.allocate_nonzero<ConstCastBadAllowsZero>(1);
|
|
result.data.bad_allows_zero->wanted_type = wanted_type;
|
|
result.data.bad_allows_zero->actual_type = actual_type;
|
|
return result;
|
|
}
|
|
if ((err = type_resolve(g, actual_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
if ((err = type_resolve(g, wanted_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
if ((err = type_resolve(g, wanted_type, ResolveStatusZeroBitsKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
if ((err = type_resolve(g, actual_type, ResolveStatusZeroBitsKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
if (type_has_bits(g, wanted_type) == type_has_bits(g, actual_type) &&
|
|
actual_ptr_type->data.pointer.bit_offset_in_host == wanted_ptr_type->data.pointer.bit_offset_in_host &&
|
|
actual_ptr_type->data.pointer.host_int_bytes == wanted_ptr_type->data.pointer.host_int_bytes &&
|
|
get_ptr_align(ira->codegen, actual_ptr_type) >= get_ptr_align(ira->codegen, wanted_ptr_type))
|
|
{
|
|
return result;
|
|
}
|
|
}
|
|
|
|
// arrays
|
|
if (wanted_type->id == ZigTypeIdArray && actual_type->id == ZigTypeIdArray &&
|
|
wanted_type->data.array.len == actual_type->data.array.len)
|
|
{
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.array.child_type,
|
|
actual_type->data.array.child_type, source_node, wanted_is_mutable);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdArrayChild;
|
|
result.data.array_mismatch = heap::c_allocator.allocate_nonzero<ConstCastArrayMismatch>(1);
|
|
result.data.array_mismatch->child = child;
|
|
result.data.array_mismatch->wanted_child = wanted_type->data.array.child_type;
|
|
result.data.array_mismatch->actual_child = actual_type->data.array.child_type;
|
|
return result;
|
|
}
|
|
bool ok_null_terminated = (wanted_type->data.array.sentinel == nullptr) ||
|
|
(actual_type->data.array.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, wanted_type->data.array.sentinel, actual_type->data.array.sentinel));
|
|
if (!ok_null_terminated) {
|
|
result.id = ConstCastResultIdSentinelArrays;
|
|
result.data.sentinel_arrays = heap::c_allocator.allocate_nonzero<ConstCastBadNullTermArrays>(1);
|
|
result.data.sentinel_arrays->child = child;
|
|
result.data.sentinel_arrays->wanted_type = wanted_type;
|
|
result.data.sentinel_arrays->actual_type = actual_type;
|
|
return result;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// slice const
|
|
if (is_slice(wanted_type) && is_slice(actual_type)) {
|
|
ZigType *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
ZigType *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
if ((err = type_resolve(g, actual_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
if ((err = type_resolve(g, wanted_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
|
|
result.id = ConstCastResultIdInvalid;
|
|
return result;
|
|
}
|
|
bool ok_sentinels =
|
|
wanted_ptr_type->data.pointer.sentinel == nullptr ||
|
|
(actual_ptr_type->data.pointer.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, wanted_ptr_type->data.pointer.sentinel,
|
|
actual_ptr_type->data.pointer.sentinel));
|
|
if (!ok_sentinels) {
|
|
result.id = ConstCastResultIdPtrSentinel;
|
|
result.data.bad_ptr_sentinel = heap::c_allocator.allocate_nonzero<ConstCastPtrSentinel>(1);
|
|
result.data.bad_ptr_sentinel->wanted_type = wanted_ptr_type;
|
|
result.data.bad_ptr_sentinel->actual_type = actual_ptr_type;
|
|
return result;
|
|
}
|
|
if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
|
|
(!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) &&
|
|
actual_ptr_type->data.pointer.bit_offset_in_host == wanted_ptr_type->data.pointer.bit_offset_in_host &&
|
|
actual_ptr_type->data.pointer.host_int_bytes == wanted_ptr_type->data.pointer.host_int_bytes &&
|
|
get_ptr_align(g, actual_ptr_type) >= get_ptr_align(g, wanted_ptr_type))
|
|
{
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
|
|
actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdSliceChild;
|
|
result.data.slice_mismatch = heap::c_allocator.allocate_nonzero<ConstCastSliceMismatch>(1);
|
|
result.data.slice_mismatch->child = child;
|
|
result.data.slice_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
|
|
result.data.slice_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
|
|
}
|
|
return result;
|
|
}
|
|
}
|
|
|
|
// optional types
|
|
if (wanted_type->id == ZigTypeIdOptional && actual_type->id == ZigTypeIdOptional) {
|
|
// Consider the case where the wanted type is ??[*]T and the actual one
|
|
// is ?[*]T, we cannot turn the former into the latter even though the
|
|
// child types are compatible (?[*]T and [*]T are both represented as a
|
|
// pointer). The extra level of indirection in ??[*]T means it's
|
|
// represented as a regular, fat, optional type and, as a consequence,
|
|
// has a different shape than the one of ?[*]T.
|
|
if ((wanted_ptr_type != nullptr) != (actual_ptr_type != nullptr)) {
|
|
// The use of type_mismatch is intentional
|
|
result.id = ConstCastResultIdOptionalShape;
|
|
result.data.type_mismatch = heap::c_allocator.allocate_nonzero<ConstCastTypeMismatch>(1);
|
|
result.data.type_mismatch->wanted_type = wanted_type;
|
|
result.data.type_mismatch->actual_type = actual_type;
|
|
return result;
|
|
}
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type,
|
|
actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdOptionalChild;
|
|
result.data.optional = heap::c_allocator.allocate_nonzero<ConstCastOptionalMismatch>(1);
|
|
result.data.optional->child = child;
|
|
result.data.optional->wanted_child = wanted_type->data.maybe.child_type;
|
|
result.data.optional->actual_child = actual_type->data.maybe.child_type;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// error union
|
|
if (wanted_type->id == ZigTypeIdErrorUnion && actual_type->id == ZigTypeIdErrorUnion) {
|
|
ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type,
|
|
actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
|
|
if (payload_child.id == ConstCastResultIdInvalid)
|
|
return payload_child;
|
|
if (payload_child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdErrorUnionPayload;
|
|
result.data.error_union_payload = heap::c_allocator.allocate_nonzero<ConstCastErrUnionPayloadMismatch>(1);
|
|
result.data.error_union_payload->child = payload_child;
|
|
result.data.error_union_payload->wanted_payload = wanted_type->data.error_union.payload_type;
|
|
result.data.error_union_payload->actual_payload = actual_type->data.error_union.payload_type;
|
|
return result;
|
|
}
|
|
ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
|
|
actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
|
|
if (error_set_child.id == ConstCastResultIdInvalid)
|
|
return error_set_child;
|
|
if (error_set_child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdErrorUnionErrorSet;
|
|
result.data.error_union_error_set = heap::c_allocator.allocate_nonzero<ConstCastErrUnionErrSetMismatch>(1);
|
|
result.data.error_union_error_set->child = error_set_child;
|
|
result.data.error_union_error_set->wanted_err_set = wanted_type->data.error_union.err_set_type;
|
|
result.data.error_union_error_set->actual_err_set = actual_type->data.error_union.err_set_type;
|
|
return result;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// error set
|
|
if (wanted_type->id == ZigTypeIdErrorSet && actual_type->id == ZigTypeIdErrorSet) {
|
|
ZigType *contained_set = actual_type;
|
|
ZigType *container_set = wanted_type;
|
|
|
|
// if the container set is inferred, then this will always work.
|
|
if (container_set->data.error_set.infer_fn != nullptr && container_set->data.error_set.incomplete) {
|
|
return result;
|
|
}
|
|
// if the container set is the global one, it will always work.
|
|
if (type_is_global_error_set(container_set)) {
|
|
return result;
|
|
}
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, contained_set, source_node)) {
|
|
result.id = ConstCastResultIdUnresolvedInferredErrSet;
|
|
return result;
|
|
}
|
|
|
|
if (type_is_global_error_set(contained_set)) {
|
|
result.id = ConstCastResultIdErrSetGlobal;
|
|
return result;
|
|
}
|
|
|
|
size_t errors_count = g->errors_by_index.length;
|
|
ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
|
|
for (uint32_t i = 0; i < container_set->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = container_set->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
for (uint32_t i = 0; i < contained_set->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = contained_set->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
if (result.id == ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdErrSet;
|
|
result.data.error_set_mismatch = heap::c_allocator.create<ConstCastErrSetMismatch>();
|
|
}
|
|
result.data.error_set_mismatch->missing_errors.append(contained_error_entry);
|
|
}
|
|
}
|
|
heap::c_allocator.deallocate(errors, errors_count);
|
|
return result;
|
|
}
|
|
|
|
// fn
|
|
if (wanted_type->id == ZigTypeIdFn &&
|
|
actual_type->id == ZigTypeIdFn)
|
|
{
|
|
if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
|
|
result.id = ConstCastResultIdFnAlign;
|
|
return result;
|
|
}
|
|
if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
|
|
result.id = ConstCastResultIdFnVarArgs;
|
|
return result;
|
|
}
|
|
if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
|
|
result.id = ConstCastResultIdFnIsGeneric;
|
|
return result;
|
|
}
|
|
if (!wanted_type->data.fn.is_generic &&
|
|
actual_type->data.fn.fn_type_id.return_type->id != ZigTypeIdUnreachable)
|
|
{
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type,
|
|
actual_type->data.fn.fn_type_id.return_type, source_node, false);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdFnReturnType;
|
|
result.data.return_type = heap::c_allocator.allocate_nonzero<ConstCastOnly>(1);
|
|
*result.data.return_type = child;
|
|
return result;
|
|
}
|
|
}
|
|
if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
|
|
result.id = ConstCastResultIdFnArgCount;
|
|
return result;
|
|
}
|
|
if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
|
|
result.id = ConstCastResultIdFnGenericArgCount;
|
|
return result;
|
|
}
|
|
assert(wanted_type->data.fn.is_generic ||
|
|
wanted_type->data.fn.fn_type_id.next_param_index == wanted_type->data.fn.fn_type_id.param_count);
|
|
for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.param_count; i += 1) {
|
|
// note it's reversed for parameters
|
|
FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i];
|
|
FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i];
|
|
|
|
ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type,
|
|
expected_param_info->type, source_node, false);
|
|
if (arg_child.id == ConstCastResultIdInvalid)
|
|
return arg_child;
|
|
if (arg_child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdFnArg;
|
|
result.data.fn_arg.arg_index = i;
|
|
result.data.fn_arg.actual_param_type = actual_param_info->type;
|
|
result.data.fn_arg.expected_param_type = expected_param_info->type;
|
|
result.data.fn_arg.child = heap::c_allocator.allocate_nonzero<ConstCastOnly>(1);
|
|
*result.data.fn_arg.child = arg_child;
|
|
return result;
|
|
}
|
|
|
|
if (expected_param_info->is_noalias != actual_param_info->is_noalias) {
|
|
result.id = ConstCastResultIdFnArgNoAlias;
|
|
result.data.arg_no_alias.arg_index = i;
|
|
return result;
|
|
}
|
|
}
|
|
if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
|
|
// ConstCastResultIdFnCC is guaranteed to be the last one reported, meaning everything else is ok.
|
|
result.id = ConstCastResultIdFnCC;
|
|
return result;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
if (wanted_type->id == ZigTypeIdInt && actual_type->id == ZigTypeIdInt) {
|
|
if (wanted_type->data.integral.is_signed != actual_type->data.integral.is_signed ||
|
|
wanted_type->data.integral.bit_count != actual_type->data.integral.bit_count)
|
|
{
|
|
result.id = ConstCastResultIdIntShorten;
|
|
result.data.int_shorten = heap::c_allocator.allocate_nonzero<ConstCastIntShorten>(1);
|
|
result.data.int_shorten->wanted_type = wanted_type;
|
|
result.data.int_shorten->actual_type = actual_type;
|
|
return result;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
if (wanted_type->id == ZigTypeIdVector && actual_type->id == ZigTypeIdVector) {
|
|
if (actual_type->data.vector.len != wanted_type->data.vector.len) {
|
|
result.id = ConstCastResultIdVectorLength;
|
|
return result;
|
|
}
|
|
|
|
ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.vector.elem_type,
|
|
actual_type->data.vector.elem_type, source_node, false);
|
|
if (child.id == ConstCastResultIdInvalid)
|
|
return child;
|
|
if (child.id != ConstCastResultIdOk) {
|
|
result.id = ConstCastResultIdVectorChild;
|
|
return result;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
result.id = ConstCastResultIdType;
|
|
result.data.type_mismatch = heap::c_allocator.allocate_nonzero<ConstCastTypeMismatch>(1);
|
|
result.data.type_mismatch->wanted_type = wanted_type;
|
|
result.data.type_mismatch->actual_type = actual_type;
|
|
return result;
|
|
}
|
|
|
|
static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *errors_count) {
|
|
size_t old_errors_count = *errors_count;
|
|
*errors_count = g->errors_by_index.length;
|
|
*errors = heap::c_allocator.reallocate(*errors, old_errors_count, *errors_count);
|
|
}
|
|
|
|
static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigType *expected_type,
|
|
IrInstGen **instructions, size_t instruction_count)
|
|
{
|
|
Error err;
|
|
assert(instruction_count >= 1);
|
|
IrInstGen *prev_inst;
|
|
size_t i = 0;
|
|
for (;;) {
|
|
prev_inst = instructions[i];
|
|
if (type_is_invalid(prev_inst->value->type)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (prev_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
i += 1;
|
|
if (i == instruction_count) {
|
|
return prev_inst->value->type;
|
|
}
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
ErrorTableEntry **errors = nullptr;
|
|
size_t errors_count = 0;
|
|
ZigType *err_set_type = nullptr;
|
|
if (prev_inst->value->type->id == ZigTypeIdErrorSet) {
|
|
if (!resolve_inferred_error_set(ira->codegen, prev_inst->value->type, prev_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (type_is_global_error_set(prev_inst->value->type)) {
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
} else {
|
|
err_set_type = prev_inst->value->type;
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool any_are_null = (prev_inst->value->type->id == ZigTypeIdNull);
|
|
bool convert_to_const_slice = false;
|
|
bool make_the_slice_const = false;
|
|
bool make_the_pointer_const = false;
|
|
for (; i < instruction_count; i += 1) {
|
|
IrInstGen *cur_inst = instructions[i];
|
|
ZigType *cur_type = cur_inst->value->type;
|
|
ZigType *prev_type = prev_inst->value->type;
|
|
|
|
if (type_is_invalid(cur_type)) {
|
|
return cur_type;
|
|
}
|
|
|
|
if (prev_type == cur_type) {
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdUnreachable) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdUnreachable) {
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdErrorSet) {
|
|
ir_assert_gen(err_set_type != nullptr, prev_inst);
|
|
if (cur_type->id == ZigTypeIdErrorSet) {
|
|
if (type_is_global_error_set(err_set_type)) {
|
|
continue;
|
|
}
|
|
bool allow_infer = cur_type->data.error_set.infer_fn != nullptr &&
|
|
cur_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (!allow_infer && type_is_global_error_set(cur_type)) {
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
// number of declared errors might have increased now
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
// if err_set_type is a superset of cur_type, keep err_set_type.
|
|
// if cur_type is a superset of err_set_type, switch err_set_type to cur_type
|
|
bool prev_is_superset = true;
|
|
for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
prev_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (prev_is_superset) {
|
|
continue;
|
|
}
|
|
|
|
// unset everything in errors
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
|
errors[error_entry->value] = nullptr;
|
|
}
|
|
for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
|
|
assert(errors[i] == nullptr);
|
|
}
|
|
for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = cur_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
bool cur_is_superset = true;
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = err_set_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
cur_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (cur_is_superset) {
|
|
err_set_type = cur_type;
|
|
prev_inst = cur_inst;
|
|
assert(errors != nullptr);
|
|
continue;
|
|
}
|
|
|
|
// neither of them are supersets. so we invent a new error set type that is a union of both of them
|
|
err_set_type = get_error_set_union(ira->codegen, errors, cur_type, err_set_type, nullptr);
|
|
assert(errors != nullptr);
|
|
continue;
|
|
} else if (cur_type->id == ZigTypeIdErrorUnion) {
|
|
if (type_is_global_error_set(err_set_type)) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
|
|
bool allow_infer = cur_err_set_type->data.error_set.infer_fn != nullptr &&
|
|
cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (!allow_infer && type_is_global_error_set(cur_err_set_type)) {
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
// test if err_set_type is a subset of cur_type's error set
|
|
// unset everything in errors
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
|
errors[error_entry->value] = nullptr;
|
|
}
|
|
for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
|
|
assert(errors[i] == nullptr);
|
|
}
|
|
for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = cur_err_set_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
bool cur_is_superset = true;
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = err_set_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
cur_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (cur_is_superset) {
|
|
err_set_type = cur_err_set_type;
|
|
prev_inst = cur_inst;
|
|
assert(errors != nullptr);
|
|
continue;
|
|
}
|
|
|
|
// not a subset. invent new error set type, union of both of them
|
|
err_set_type = get_error_set_union(ira->codegen, errors, cur_err_set_type, err_set_type, nullptr);
|
|
prev_inst = cur_inst;
|
|
assert(errors != nullptr);
|
|
continue;
|
|
} else {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdErrorSet) {
|
|
bool allow_infer = cur_type->data.error_set.infer_fn != nullptr &&
|
|
cur_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if (!allow_infer && type_is_global_error_set(cur_type)) {
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
continue;
|
|
}
|
|
if (err_set_type != nullptr && type_is_global_error_set(err_set_type)) {
|
|
continue;
|
|
}
|
|
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
if (err_set_type == nullptr) {
|
|
bool allow_infer = false;
|
|
if (prev_type->id == ZigTypeIdErrorUnion) {
|
|
err_set_type = prev_type->data.error_union.err_set_type;
|
|
allow_infer = err_set_type->data.error_set.infer_fn != nullptr &&
|
|
err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
} else {
|
|
err_set_type = cur_type;
|
|
}
|
|
|
|
if (!allow_infer && !resolve_inferred_error_set(ira->codegen, err_set_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
if (!allow_infer && type_is_global_error_set(err_set_type)) {
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
continue;
|
|
}
|
|
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
if (err_set_type == cur_type) {
|
|
continue;
|
|
}
|
|
}
|
|
// check if the cur type error set is a subset
|
|
bool prev_is_superset = true;
|
|
for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
prev_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (prev_is_superset) {
|
|
continue;
|
|
}
|
|
// not a subset. invent new error set type, union of both of them
|
|
err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_type, nullptr);
|
|
assert(errors != nullptr);
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdErrorUnion && cur_type->id == ZigTypeIdErrorUnion) {
|
|
ZigType *prev_payload_type = prev_type->data.error_union.payload_type;
|
|
ZigType *cur_payload_type = cur_type->data.error_union.payload_type;
|
|
|
|
bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type,
|
|
source_node, false).id == ConstCastResultIdOk;
|
|
bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type,
|
|
source_node, false).id == ConstCastResultIdOk;
|
|
|
|
if (const_cast_prev || const_cast_cur) {
|
|
if (const_cast_cur) {
|
|
prev_inst = cur_inst;
|
|
}
|
|
|
|
ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
|
|
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
|
|
if (prev_err_set_type == cur_err_set_type)
|
|
continue;
|
|
|
|
bool allow_infer_prev = prev_err_set_type->data.error_set.infer_fn != nullptr &&
|
|
prev_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
bool allow_infer_cur = cur_err_set_type->data.error_set.infer_fn != nullptr &&
|
|
cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
|
|
if (!allow_infer_prev && !resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
if (!allow_infer_cur && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
if ((!allow_infer_prev && type_is_global_error_set(prev_err_set_type)) ||
|
|
(!allow_infer_cur && type_is_global_error_set(cur_err_set_type)))
|
|
{
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
continue;
|
|
}
|
|
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
if (err_set_type == nullptr) {
|
|
err_set_type = prev_err_set_type;
|
|
for (uint32_t i = 0; i < prev_err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = prev_err_set_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
}
|
|
bool prev_is_superset = true;
|
|
for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = cur_err_set_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
prev_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (prev_is_superset) {
|
|
continue;
|
|
}
|
|
// unset all the errors
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
|
errors[error_entry->value] = nullptr;
|
|
}
|
|
for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
|
|
assert(errors[i] == nullptr);
|
|
}
|
|
for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = cur_err_set_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
bool cur_is_superset = true;
|
|
for (uint32_t i = 0; i < prev_err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *contained_error_entry = prev_err_set_type->data.error_set.errors[i];
|
|
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
|
|
if (error_entry == nullptr) {
|
|
cur_is_superset = false;
|
|
break;
|
|
}
|
|
}
|
|
if (cur_is_superset) {
|
|
err_set_type = cur_err_set_type;
|
|
continue;
|
|
}
|
|
|
|
err_set_type = get_error_set_union(ira->codegen, errors, cur_err_set_type, prev_err_set_type, nullptr);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdNull) {
|
|
prev_inst = cur_inst;
|
|
any_are_null = true;
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdNull) {
|
|
any_are_null = true;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdEnum && cur_type->id == ZigTypeIdEnumLiteral) {
|
|
TypeEnumField *field = find_enum_type_field(prev_type, cur_inst->value->data.x_enum_literal);
|
|
if (field != nullptr) {
|
|
continue;
|
|
}
|
|
}
|
|
if (is_tagged_union(prev_type) && cur_type->id == ZigTypeIdEnumLiteral) {
|
|
TypeUnionField *field = find_union_type_field(prev_type, cur_inst->value->data.x_enum_literal);
|
|
if (field != nullptr) {
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdEnum && prev_type->id == ZigTypeIdEnumLiteral) {
|
|
TypeEnumField *field = find_enum_type_field(cur_type, prev_inst->value->data.x_enum_literal);
|
|
if (field != nullptr) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (is_tagged_union(cur_type) && prev_type->id == ZigTypeIdEnumLiteral) {
|
|
TypeUnionField *field = find_union_type_field(cur_type, prev_inst->value->data.x_enum_literal);
|
|
if (field != nullptr) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenC &&
|
|
(cur_type->id == ZigTypeIdComptimeInt || cur_type->id == ZigTypeIdInt))
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenC &&
|
|
(prev_type->id == ZigTypeIdComptimeInt || prev_type->id == ZigTypeIdInt))
|
|
{
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdPointer && cur_type->id == ZigTypeIdPointer) {
|
|
if (prev_type->data.pointer.ptr_len == PtrLenC &&
|
|
types_match_const_cast_only(ira, prev_type->data.pointer.child_type,
|
|
cur_type->data.pointer.child_type, source_node,
|
|
!prev_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
continue;
|
|
}
|
|
if (cur_type->data.pointer.ptr_len == PtrLenC &&
|
|
types_match_const_cast_only(ira, cur_type->data.pointer.child_type,
|
|
prev_type->data.pointer.child_type, source_node,
|
|
!cur_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) {
|
|
continue;
|
|
}
|
|
|
|
if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdInt &&
|
|
cur_type->id == ZigTypeIdInt &&
|
|
prev_type->data.integral.is_signed == cur_type->data.integral.is_signed)
|
|
{
|
|
if (cur_type->data.integral.bit_count > prev_type->data.integral.bit_count) {
|
|
prev_inst = cur_inst;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdFloat && cur_type->id == ZigTypeIdFloat) {
|
|
if (cur_type->data.floating.bit_count > prev_type->data.floating.bit_count) {
|
|
prev_inst = cur_inst;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdErrorUnion &&
|
|
types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdErrorUnion &&
|
|
types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
if (err_set_type != nullptr) {
|
|
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
|
|
bool allow_infer = cur_err_set_type->data.error_set.infer_fn != nullptr &&
|
|
cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
|
|
if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
if ((!allow_infer && type_is_global_error_set(cur_err_set_type)) ||
|
|
type_is_global_error_set(err_set_type))
|
|
{
|
|
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
update_errors_helper(ira->codegen, &errors, &errors_count);
|
|
|
|
err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_err_set_type, nullptr);
|
|
}
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdOptional &&
|
|
types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdOptional &&
|
|
types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdOptional &&
|
|
types_match_const_cast_only(ira, cur_type, prev_type->data.maybe.child_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
prev_inst = cur_inst;
|
|
any_are_null = true;
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdOptional &&
|
|
types_match_const_cast_only(ira, prev_type, cur_type->data.maybe.child_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
any_are_null = true;
|
|
continue;
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdUndefined) {
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdUndefined) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdComptimeInt ||
|
|
prev_type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
if (ir_num_lit_fits_in_other_type(ira, prev_inst, cur_type, false)) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
} else {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdComptimeInt ||
|
|
cur_type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
if (ir_num_lit_fits_in_other_type(ira, cur_inst, prev_type, false)) {
|
|
continue;
|
|
} else {
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
|
|
// *[N]T to [*]T
|
|
if (prev_type->id == ZigTypeIdPointer &&
|
|
prev_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
((cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenUnknown)))
|
|
{
|
|
convert_to_const_slice = false;
|
|
prev_inst = cur_inst;
|
|
|
|
if (prev_type->data.pointer.is_const && !cur_type->data.pointer.is_const) {
|
|
// const array pointer and non-const unknown pointer
|
|
make_the_pointer_const = true;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
// *[N]T to [*]T
|
|
if (cur_type->id == ZigTypeIdPointer &&
|
|
cur_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
((prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenUnknown)))
|
|
{
|
|
if (cur_type->data.pointer.is_const && !prev_type->data.pointer.is_const) {
|
|
// const array pointer and non-const unknown pointer
|
|
make_the_pointer_const = true;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
// *[N]T to []T
|
|
// *[N]T to E![]T
|
|
if (cur_type->id == ZigTypeIdPointer &&
|
|
cur_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
((prev_type->id == ZigTypeIdErrorUnion && is_slice(prev_type->data.error_union.payload_type)) ||
|
|
is_slice(prev_type)))
|
|
{
|
|
ZigType *array_type = cur_type->data.pointer.child_type;
|
|
ZigType *slice_type = (prev_type->id == ZigTypeIdErrorUnion) ?
|
|
prev_type->data.error_union.payload_type : prev_type;
|
|
ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
|
|
array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0 ||
|
|
!cur_type->data.pointer.is_const);
|
|
if (!const_ok) make_the_slice_const = true;
|
|
convert_to_const_slice = false;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// *[N]T to []T
|
|
// *[N]T to E![]T
|
|
if (prev_type->id == ZigTypeIdPointer &&
|
|
prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
prev_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
((cur_type->id == ZigTypeIdErrorUnion && is_slice(cur_type->data.error_union.payload_type)) ||
|
|
(cur_type->id == ZigTypeIdOptional && is_slice(cur_type->data.maybe.child_type)) ||
|
|
is_slice(cur_type)))
|
|
{
|
|
ZigType *array_type = prev_type->data.pointer.child_type;
|
|
ZigType *slice_type;
|
|
switch (cur_type->id) {
|
|
case ZigTypeIdErrorUnion:
|
|
slice_type = cur_type->data.error_union.payload_type;
|
|
break;
|
|
case ZigTypeIdOptional:
|
|
slice_type = cur_type->data.maybe.child_type;
|
|
break;
|
|
default:
|
|
slice_type = cur_type;
|
|
break;
|
|
}
|
|
ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
|
|
array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0 ||
|
|
!prev_type->data.pointer.is_const);
|
|
if (!const_ok) make_the_slice_const = true;
|
|
prev_inst = cur_inst;
|
|
convert_to_const_slice = false;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// *[N]T and *[M]T
|
|
if (cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
(
|
|
prev_type->data.pointer.child_type->data.array.sentinel == nullptr ||
|
|
(cur_type->data.pointer.child_type->data.array.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, prev_type->data.pointer.child_type->data.array.sentinel,
|
|
cur_type->data.pointer.child_type->data.array.sentinel))
|
|
) &&
|
|
types_match_const_cast_only(ira,
|
|
cur_type->data.pointer.child_type->data.array.child_type,
|
|
prev_type->data.pointer.child_type->data.array.child_type,
|
|
source_node, !cur_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
bool const_ok = (cur_type->data.pointer.is_const || !prev_type->data.pointer.is_const ||
|
|
prev_type->data.pointer.child_type->data.array.len == 0);
|
|
if (!const_ok) make_the_slice_const = true;
|
|
prev_inst = cur_inst;
|
|
convert_to_const_slice = true;
|
|
continue;
|
|
}
|
|
if (prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
(
|
|
cur_type->data.pointer.child_type->data.array.sentinel == nullptr ||
|
|
(prev_type->data.pointer.child_type->data.array.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, cur_type->data.pointer.child_type->data.array.sentinel,
|
|
prev_type->data.pointer.child_type->data.array.sentinel))
|
|
) &&
|
|
types_match_const_cast_only(ira,
|
|
prev_type->data.pointer.child_type->data.array.child_type,
|
|
cur_type->data.pointer.child_type->data.array.child_type,
|
|
source_node, !prev_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
bool const_ok = (prev_type->data.pointer.is_const || !cur_type->data.pointer.is_const ||
|
|
cur_type->data.pointer.child_type->data.array.len == 0);
|
|
if (!const_ok) make_the_slice_const = true;
|
|
convert_to_const_slice = true;
|
|
continue;
|
|
}
|
|
|
|
if (prev_type->id == ZigTypeIdEnum && cur_type->id == ZigTypeIdUnion &&
|
|
(cur_type->data.unionation.decl_node->data.container_decl.auto_enum || cur_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
|
|
{
|
|
if ((err = type_resolve(ira->codegen, cur_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
if (cur_type->data.unionation.tag_type == prev_type) {
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (cur_type->id == ZigTypeIdEnum && prev_type->id == ZigTypeIdUnion &&
|
|
(prev_type->data.unionation.decl_node->data.container_decl.auto_enum || prev_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
|
|
{
|
|
if ((err = type_resolve(ira->codegen, prev_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
if (prev_type->data.unionation.tag_type == cur_type) {
|
|
prev_inst = cur_inst;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("incompatible types: '%s' and '%s'",
|
|
buf_ptr(&prev_type->name), buf_ptr(&cur_type->name)));
|
|
add_error_note(ira->codegen, msg, prev_inst->base.source_node,
|
|
buf_sprintf("type '%s' here", buf_ptr(&prev_type->name)));
|
|
add_error_note(ira->codegen, msg, cur_inst->base.source_node,
|
|
buf_sprintf("type '%s' here", buf_ptr(&cur_type->name)));
|
|
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
heap::c_allocator.deallocate(errors, errors_count);
|
|
|
|
if (convert_to_const_slice) {
|
|
if (prev_inst->value->type->id == ZigTypeIdPointer) {
|
|
ZigType *array_type = prev_inst->value->type->data.pointer.child_type;
|
|
src_assert(array_type->id == ZigTypeIdArray, source_node);
|
|
ZigType *ptr_type = get_pointer_to_type_extra2(
|
|
ira->codegen, array_type->data.array.child_type,
|
|
prev_inst->value->type->data.pointer.is_const || make_the_slice_const, false,
|
|
PtrLenUnknown,
|
|
0, 0, 0, false,
|
|
VECTOR_INDEX_NONE, nullptr, array_type->data.array.sentinel);
|
|
ZigType *slice_type = get_slice_type(ira->codegen, ptr_type);
|
|
if (err_set_type != nullptr) {
|
|
return get_error_union_type(ira->codegen, err_set_type, slice_type);
|
|
} else {
|
|
return slice_type;
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
} else if (err_set_type != nullptr) {
|
|
if (prev_inst->value->type->id == ZigTypeIdErrorSet) {
|
|
return err_set_type;
|
|
} else if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
|
|
ZigType *payload_type = prev_inst->value->type->data.error_union.payload_type;
|
|
if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
return get_error_union_type(ira->codegen, err_set_type, payload_type);
|
|
} else if (expected_type != nullptr && expected_type->id == ZigTypeIdErrorUnion) {
|
|
ZigType *payload_type = expected_type->data.error_union.payload_type;
|
|
if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
return get_error_union_type(ira->codegen, err_set_type, payload_type);
|
|
} else {
|
|
if (prev_inst->value->type->id == ZigTypeIdComptimeInt ||
|
|
prev_inst->value->type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("unable to make error union out of number literal"));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
} else if (prev_inst->value->type->id == ZigTypeIdNull) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("unable to make error union out of null literal"));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
} else {
|
|
if ((err = type_resolve(ira->codegen, prev_inst->value->type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
return get_error_union_type(ira->codegen, err_set_type, prev_inst->value->type);
|
|
}
|
|
}
|
|
} else if (any_are_null && prev_inst->value->type->id != ZigTypeIdNull) {
|
|
if (prev_inst->value->type->id == ZigTypeIdOptional) {
|
|
return prev_inst->value->type;
|
|
} else {
|
|
if ((err = type_resolve(ira->codegen, prev_inst->value->type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
return get_optional_type(ira->codegen, prev_inst->value->type);
|
|
}
|
|
} else if (make_the_slice_const) {
|
|
ZigType *slice_type;
|
|
if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
|
|
slice_type = prev_inst->value->type->data.error_union.payload_type;
|
|
} else if (is_slice(prev_inst->value->type)) {
|
|
slice_type = prev_inst->value->type;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
ZigType *adjusted_ptr_type = adjust_ptr_const(ira->codegen, slice_ptr_type, make_the_slice_const);
|
|
ZigType *adjusted_slice_type = get_slice_type(ira->codegen, adjusted_ptr_type);
|
|
if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
|
|
return get_error_union_type(ira->codegen, prev_inst->value->type->data.error_union.err_set_type,
|
|
adjusted_slice_type);
|
|
} else if (is_slice(prev_inst->value->type)) {
|
|
return adjusted_slice_type;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
} else if (make_the_pointer_const) {
|
|
return adjust_ptr_const(ira->codegen, prev_inst->value->type, make_the_pointer_const);
|
|
} else {
|
|
return prev_inst->value->type;
|
|
}
|
|
}
|
|
|
|
static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInst *source_instr,
|
|
CastOp cast_op,
|
|
ZigValue *other_val, ZigType *other_type,
|
|
ZigValue *const_val, ZigType *new_type)
|
|
{
|
|
const_val->special = other_val->special;
|
|
|
|
assert(other_val != const_val);
|
|
switch (cast_op) {
|
|
case CastOpNoCast:
|
|
zig_unreachable();
|
|
case CastOpErrSet:
|
|
case CastOpBitCast:
|
|
zig_panic("TODO");
|
|
case CastOpNoop: {
|
|
copy_const_val(ira->codegen, const_val, other_val);
|
|
const_val->type = new_type;
|
|
break;
|
|
}
|
|
case CastOpNumLitToConcrete:
|
|
if (other_val->type->id == ZigTypeIdComptimeFloat) {
|
|
assert(new_type->id == ZigTypeIdFloat);
|
|
switch (new_type->data.floating.bit_count) {
|
|
case 16:
|
|
const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat);
|
|
break;
|
|
case 32:
|
|
const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat);
|
|
break;
|
|
case 64:
|
|
const_val->data.x_f64 = bigfloat_to_f64(&other_val->data.x_bigfloat);
|
|
break;
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128:
|
|
const_val->data.x_f128 = bigfloat_to_f128(&other_val->data.x_bigfloat);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else if (other_val->type->id == ZigTypeIdComptimeInt) {
|
|
bigint_init_bigint(&const_val->data.x_bigint, &other_val->data.x_bigint);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
const_val->type = new_type;
|
|
break;
|
|
case CastOpIntToFloat:
|
|
if (new_type->id == ZigTypeIdFloat) {
|
|
BigFloat bigfloat;
|
|
bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint);
|
|
switch (new_type->data.floating.bit_count) {
|
|
case 16:
|
|
const_val->data.x_f16 = bigfloat_to_f16(&bigfloat);
|
|
break;
|
|
case 32:
|
|
const_val->data.x_f32 = bigfloat_to_f32(&bigfloat);
|
|
break;
|
|
case 64:
|
|
const_val->data.x_f64 = bigfloat_to_f64(&bigfloat);
|
|
break;
|
|
case 80:
|
|
zig_panic("TODO");
|
|
case 128:
|
|
const_val->data.x_f128 = bigfloat_to_f128(&bigfloat);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else if (new_type->id == ZigTypeIdComptimeFloat) {
|
|
bigfloat_init_bigint(&const_val->data.x_bigfloat, &other_val->data.x_bigint);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
const_val->special = ConstValSpecialStatic;
|
|
break;
|
|
case CastOpFloatToInt:
|
|
float_init_bigint(&const_val->data.x_bigint, other_val);
|
|
if (new_type->id == ZigTypeIdInt) {
|
|
if (!bigint_fits_in_bits(&const_val->data.x_bigint, new_type->data.integral.bit_count,
|
|
new_type->data.integral.is_signed))
|
|
{
|
|
Buf *int_buf = buf_alloc();
|
|
bigint_append_buf(int_buf, &const_val->data.x_bigint, 10);
|
|
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("integer value '%s' cannot be stored in type '%s'",
|
|
buf_ptr(int_buf), buf_ptr(&new_type->name)));
|
|
return false;
|
|
}
|
|
}
|
|
|
|
const_val->special = ConstValSpecialStatic;
|
|
break;
|
|
case CastOpBoolToInt:
|
|
bigint_init_unsigned(&const_val->data.x_bigint, other_val->data.x_bool ? 1 : 0);
|
|
const_val->special = ConstValSpecialStatic;
|
|
break;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static IrInstGen *ir_const(IrAnalyze *ira, IrInst *inst, ZigType *ty) {
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
inst->scope, inst->source_node);
|
|
IrInstGen *new_instruction = &const_instruction->base;
|
|
new_instruction->value->type = ty;
|
|
new_instruction->value->special = ConstValSpecialStatic;
|
|
ira->new_irb.constants.append(&heap::c_allocator, const_instruction);
|
|
return new_instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_const_noval(IrAnalyze *ira, IrInst *old_instruction) {
|
|
IrInstGenConst *const_instruction = ir_create_inst_noval<IrInstGenConst>(&ira->new_irb,
|
|
old_instruction->scope, old_instruction->source_node);
|
|
ira->new_irb.constants.append(&heap::c_allocator, const_instruction);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
// This function initializes the new IrInstGen with the provided ZigValue,
|
|
// rather than creating a new one.
|
|
static IrInstGen *ir_const_move(IrAnalyze *ira, IrInst *old_instruction, ZigValue *val) {
|
|
IrInstGen *result = ir_const_noval(ira, old_instruction);
|
|
result->value = val;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_resolve_cast(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
|
|
ZigType *wanted_type, CastOp cast_op)
|
|
{
|
|
if (instr_is_comptime(value) || !type_has_bits(ira->codegen, wanted_type)) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!eval_const_expr_implicit_cast(ira, source_instr, cast_op, val, val->type,
|
|
result->value, wanted_type))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return result;
|
|
} else {
|
|
return ir_build_cast(ira, source_instr, wanted_type, value, cast_op);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *wanted_type)
|
|
{
|
|
ir_assert(value->value->type->id == ZigTypeIdPointer, source_instr);
|
|
|
|
Error err;
|
|
|
|
if ((err = type_resolve(ira->codegen, value->value->type->data.pointer.child_type,
|
|
ResolveStatusAlignmentKnown)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
wanted_type = adjust_ptr_align(ira->codegen, wanted_type, get_ptr_align(ira->codegen, value->value->type));
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_instr, wanted_type);
|
|
|
|
ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node);
|
|
if (pointee == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (pointee->special != ConstValSpecialRuntime) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
result->value->data.x_ptr.mut = val->data.x_ptr.mut;
|
|
result->value->data.x_ptr.data.base_array.array_val = pointee;
|
|
result->value->data.x_ptr.data.base_array.elem_index = 0;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_cast(ira, source_instr, wanted_type, value, CastOpBitCast);
|
|
}
|
|
|
|
static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *array_ptr, ZigType *wanted_type, ResultLoc *result_loc)
|
|
{
|
|
Error err;
|
|
|
|
assert(array_ptr->value->type->id == ZigTypeIdPointer);
|
|
assert(array_ptr->value->type->data.pointer.child_type->id == ZigTypeIdArray);
|
|
|
|
ZigType *array_type = array_ptr->value->type->data.pointer.child_type;
|
|
size_t array_len = array_type->data.array.len;
|
|
|
|
// A zero-sized array can be casted regardless of the destination alignment, or
|
|
// whether the pointer is undefined, and the result is always comptime known.
|
|
// TODO However, this is exposing a result location bug that I failed to solve on the first try.
|
|
// If you want to try to fix the bug, uncomment this block and get the tests passing.
|
|
//if (array_len == 0 && array_type->data.array.sentinel == nullptr) {
|
|
// ZigValue *undef_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
// undef_array->special = ConstValSpecialUndef;
|
|
// undef_array->type = array_type;
|
|
|
|
// IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
// init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
|
|
// result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
// result->value->type = wanted_type;
|
|
// return result;
|
|
//}
|
|
|
|
if ((err = type_resolve(ira->codegen, array_ptr->value->type, ResolveStatusAlignmentKnown))) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (array_len != 0) {
|
|
wanted_type = adjust_slice_align(ira->codegen, wanted_type,
|
|
get_ptr_align(ira->codegen, array_ptr->value->type));
|
|
}
|
|
|
|
if (instr_is_comptime(array_ptr)) {
|
|
UndefAllowed undef_allowed = (array_len == 0) ? UndefOk : UndefBad;
|
|
ZigValue *array_ptr_val = ir_resolve_const(ira, array_ptr, undef_allowed);
|
|
if (array_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ir_assert(is_slice(wanted_type), source_instr);
|
|
if (array_ptr_val->special == ConstValSpecialUndef) {
|
|
ZigValue *undef_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
undef_array->special = ConstValSpecialUndef;
|
|
undef_array->type = array_type;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
|
|
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
result->value->type = wanted_type;
|
|
return result;
|
|
}
|
|
bool wanted_const = wanted_type->data.structure.fields[slice_ptr_index]->type_entry->data.pointer.is_const;
|
|
// Optimization to avoid creating unnecessary ZigValue in const_ptr_pointee
|
|
if (array_ptr_val->data.x_ptr.special == ConstPtrSpecialSubArray) {
|
|
ZigValue *array_val = array_ptr_val->data.x_ptr.data.base_array.array_val;
|
|
if (array_val->special != ConstValSpecialRuntime) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
init_const_slice(ira->codegen, result->value, array_val,
|
|
array_ptr_val->data.x_ptr.data.base_array.elem_index,
|
|
array_type->data.array.len, wanted_const);
|
|
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
|
result->value->type = wanted_type;
|
|
return result;
|
|
}
|
|
} else if (array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, array_ptr_val, source_instr->source_node);
|
|
if (pointee == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (pointee->special != ConstValSpecialRuntime) {
|
|
assert(array_ptr_val->type->id == ZigTypeIdPointer);
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, wanted_const);
|
|
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
|
result->value->type = wanted_type;
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (result_loc == nullptr) result_loc = no_result_loc();
|
|
IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) ||
|
|
result_loc_inst->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return result_loc_inst;
|
|
}
|
|
return ir_build_ptr_of_array_to_slice(ira, source_instr, wanted_type, array_ptr, result_loc_inst);
|
|
}
|
|
|
|
static IrBasicBlockGen *ir_get_new_bb(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrInst *ref_old_instruction) {
|
|
assert(old_bb);
|
|
|
|
if (old_bb->child) {
|
|
if (ref_old_instruction == nullptr || old_bb->child->ref_instruction != ref_old_instruction) {
|
|
return old_bb->child;
|
|
}
|
|
}
|
|
|
|
IrBasicBlockGen *new_bb = ir_build_bb_from(ira, old_bb);
|
|
new_bb->ref_instruction = ref_old_instruction;
|
|
|
|
return new_bb;
|
|
}
|
|
|
|
static IrBasicBlockGen *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrInst *ref_old_instruction) {
|
|
assert(ref_old_instruction != nullptr);
|
|
IrBasicBlockGen *new_bb = ir_get_new_bb(ira, old_bb, ref_old_instruction);
|
|
if (new_bb->must_be_comptime_source_instr) {
|
|
ErrorMsg *msg = ir_add_error(ira, ref_old_instruction,
|
|
buf_sprintf("control flow attempts to use compile-time variable at runtime"));
|
|
add_error_note(ira->codegen, msg, new_bb->must_be_comptime_source_instr->source_node,
|
|
buf_sprintf("compile-time variable assigned here"));
|
|
return nullptr;
|
|
}
|
|
return new_bb;
|
|
}
|
|
|
|
static void ir_start_bb(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrBasicBlockSrc *const_predecessor_bb) {
|
|
ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? &old_bb->instruction_list.at(0)->base : nullptr);
|
|
ira->instruction_index = 0;
|
|
ira->old_irb.current_basic_block = old_bb;
|
|
ira->const_predecessor_bb = const_predecessor_bb;
|
|
ira->old_bb_index = old_bb->index;
|
|
}
|
|
|
|
static IrInstGen *ira_suspend(IrAnalyze *ira, IrInst *old_instruction, IrBasicBlockSrc *next_bb,
|
|
IrSuspendPosition *suspend_pos)
|
|
{
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "suspend %s_%" PRIu32 " %s_%" PRIu32 " #%" PRIu32 " (%zu,%zu)\n",
|
|
ira->old_irb.current_basic_block->name_hint,
|
|
ira->old_irb.current_basic_block->debug_id,
|
|
ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->name_hint,
|
|
ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->debug_id,
|
|
ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index)->base.debug_id,
|
|
ira->old_bb_index, ira->instruction_index);
|
|
}
|
|
suspend_pos->basic_block_index = ira->old_bb_index;
|
|
suspend_pos->instruction_index = ira->instruction_index;
|
|
|
|
ira->old_irb.current_basic_block->suspended = true;
|
|
|
|
// null next_bb means that the caller plans to call ira_resume before returning
|
|
if (next_bb != nullptr) {
|
|
ira->old_bb_index = next_bb->index;
|
|
ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
|
|
assert(ira->old_irb.current_basic_block == next_bb);
|
|
ira->instruction_index = 0;
|
|
ira->const_predecessor_bb = nullptr;
|
|
next_bb->child = ir_get_new_bb_runtime(ira, next_bb, old_instruction);
|
|
ira->new_irb.current_basic_block = next_bb->child;
|
|
}
|
|
return ira->codegen->unreach_instruction;
|
|
}
|
|
|
|
static IrInstGen *ira_resume(IrAnalyze *ira) {
|
|
IrSuspendPosition pos = ira->resume_stack.pop();
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "resume (%zu,%zu) ", pos.basic_block_index, pos.instruction_index);
|
|
}
|
|
ira->old_bb_index = pos.basic_block_index;
|
|
ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
|
|
assert(ira->old_irb.current_basic_block->in_resume_stack);
|
|
ira->old_irb.current_basic_block->in_resume_stack = false;
|
|
ira->old_irb.current_basic_block->suspended = false;
|
|
ira->instruction_index = pos.instruction_index;
|
|
assert(pos.instruction_index < ira->old_irb.current_basic_block->instruction_list.length);
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "%s_%" PRIu32 " #%" PRIu32 "\n", ira->old_irb.current_basic_block->name_hint,
|
|
ira->old_irb.current_basic_block->debug_id,
|
|
ira->old_irb.current_basic_block->instruction_list.at(pos.instruction_index)->base.debug_id);
|
|
}
|
|
ira->const_predecessor_bb = nullptr;
|
|
ira->new_irb.current_basic_block = ira->old_irb.current_basic_block->child;
|
|
assert(ira->new_irb.current_basic_block != nullptr);
|
|
return ira->codegen->unreach_instruction;
|
|
}
|
|
|
|
static void ir_start_next_bb(IrAnalyze *ira) {
|
|
ira->old_bb_index += 1;
|
|
|
|
bool need_repeat = true;
|
|
for (;;) {
|
|
while (ira->old_bb_index < ira->old_irb.exec->basic_block_list.length) {
|
|
IrBasicBlockSrc *old_bb = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
|
|
if (old_bb->child == nullptr && old_bb->suspend_instruction_ref == nullptr) {
|
|
ira->old_bb_index += 1;
|
|
continue;
|
|
}
|
|
// if it's already started, or
|
|
// if it's a suspended block,
|
|
// then skip it
|
|
if (old_bb->suspended ||
|
|
(old_bb->child != nullptr && old_bb->child->instruction_list.length != 0) ||
|
|
(old_bb->child != nullptr && old_bb->child->already_appended))
|
|
{
|
|
ira->old_bb_index += 1;
|
|
continue;
|
|
}
|
|
|
|
// if there is a resume_stack, pop one from there rather than moving on.
|
|
// the last item of the resume stack will be a basic block that will
|
|
// move on to the next one below
|
|
if (ira->resume_stack.length != 0) {
|
|
ira_resume(ira);
|
|
return;
|
|
}
|
|
|
|
if (old_bb->child == nullptr) {
|
|
old_bb->child = ir_get_new_bb_runtime(ira, old_bb, old_bb->suspend_instruction_ref);
|
|
}
|
|
ira->new_irb.current_basic_block = old_bb->child;
|
|
ir_start_bb(ira, old_bb, nullptr);
|
|
return;
|
|
}
|
|
if (!need_repeat) {
|
|
if (ira->resume_stack.length != 0) {
|
|
ira_resume(ira);
|
|
}
|
|
return;
|
|
}
|
|
need_repeat = false;
|
|
ira->old_bb_index = 0;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
static void ir_finish_bb(IrAnalyze *ira) {
|
|
if (!ira->new_irb.current_basic_block->already_appended) {
|
|
ir_append_basic_block_gen(&ira->new_irb, ira->new_irb.current_basic_block);
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "append new bb %s_%" PRIu32 "\n", ira->new_irb.current_basic_block->name_hint,
|
|
ira->new_irb.current_basic_block->debug_id);
|
|
}
|
|
}
|
|
ira->instruction_index += 1;
|
|
while (ira->instruction_index < ira->old_irb.current_basic_block->instruction_list.length) {
|
|
IrInstSrc *next_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
|
|
if (!next_instruction->is_gen) {
|
|
ir_add_error(ira, &next_instruction->base, buf_sprintf("unreachable code"));
|
|
break;
|
|
}
|
|
ira->instruction_index += 1;
|
|
}
|
|
|
|
ir_start_next_bb(ira);
|
|
}
|
|
|
|
static IrInstGen *ir_unreach_error(IrAnalyze *ira) {
|
|
ira->old_bb_index = SIZE_MAX;
|
|
if (ira->new_irb.exec->first_err_trace_msg == nullptr) {
|
|
ira->new_irb.exec->first_err_trace_msg = ira->codegen->trace_err;
|
|
}
|
|
return ira->codegen->unreach_instruction;
|
|
}
|
|
|
|
static bool ir_emit_backward_branch(IrAnalyze *ira, IrInst* source_instruction) {
|
|
size_t *bbc = ira->new_irb.exec->backward_branch_count;
|
|
size_t *quota = ira->new_irb.exec->backward_branch_quota;
|
|
|
|
// If we're already over quota, we've already given an error message for this.
|
|
if (*bbc > *quota) {
|
|
assert(ira->codegen->errors.length > 0);
|
|
return false;
|
|
}
|
|
|
|
*bbc += 1;
|
|
if (*bbc > *quota) {
|
|
ir_add_error(ira, source_instruction,
|
|
buf_sprintf("evaluation exceeded %" ZIG_PRI_usize " backwards branches", *quota));
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static IrInstGen *ir_inline_bb(IrAnalyze *ira, IrInst* source_instruction, IrBasicBlockSrc *old_bb) {
|
|
if (old_bb->debug_id <= ira->old_irb.current_basic_block->debug_id) {
|
|
if (!ir_emit_backward_branch(ira, source_instruction))
|
|
return ir_unreach_error(ira);
|
|
}
|
|
|
|
old_bb->child = ira->old_irb.current_basic_block->child;
|
|
ir_start_bb(ira, old_bb, ira->old_irb.current_basic_block);
|
|
return ira->codegen->unreach_instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_finish_anal(IrAnalyze *ira, IrInstGen *instruction) {
|
|
if (instruction->value->type->id == ZigTypeIdUnreachable)
|
|
ir_finish_bb(ira);
|
|
return instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_const_fn(IrAnalyze *ira, IrInst *source_instr, ZigFn *fn_entry) {
|
|
IrInstGen *result = ir_const(ira, source_instr, fn_entry->type_entry);
|
|
result->value->special = ConstValSpecialStatic;
|
|
result->value->data.x_ptr.data.fn.fn_entry = fn_entry;
|
|
result->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
result->value->data.x_ptr.special = ConstPtrSpecialFunction;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_bound_fn(IrAnalyze *ira, IrInst *src_inst, ZigFn *fn_entry, IrInstGen *first_arg,
|
|
IrInst *first_arg_src)
|
|
{
|
|
// This is unfortunately required to avoid improperly freeing first_arg_src
|
|
ira_ref(ira);
|
|
|
|
IrInstGen *result = ir_const(ira, src_inst, get_bound_fn_type(ira->codegen, fn_entry));
|
|
result->value->data.x_bound_fn.fn = fn_entry;
|
|
result->value->data.x_bound_fn.first_arg = first_arg;
|
|
result->value->data.x_bound_fn.first_arg_src = first_arg_src;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_type(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty) {
|
|
IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_type);
|
|
result->value->data.x_type = ty;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_bool(IrAnalyze *ira, IrInst *source_instruction, bool value) {
|
|
IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_bool);
|
|
result->value->data.x_bool = value;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_undef(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty) {
|
|
IrInstGen *result = ir_const(ira, source_instruction, ty);
|
|
result->value->special = ConstValSpecialUndef;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_unreachable(IrAnalyze *ira, IrInst *source_instruction) {
|
|
IrInstGen *result = ir_const_noval(ira, source_instruction);
|
|
result->value = ira->codegen->intern.for_unreachable();
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_void(IrAnalyze *ira, IrInst *source_instruction) {
|
|
IrInstGen *result = ir_const_noval(ira, source_instruction);
|
|
result->value = ira->codegen->intern.for_void();
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_const_unsigned(IrAnalyze *ira, IrInst *source_instruction, uint64_t value) {
|
|
IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_num_lit_int);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, value);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_get_const_ptr(IrAnalyze *ira, IrInst *instruction,
|
|
ZigValue *pointee, ZigType *pointee_type,
|
|
ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
|
|
{
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
|
|
ptr_is_const, ptr_is_volatile, PtrLenSingle, ptr_align, 0, 0, false);
|
|
IrInstGen *const_instr = ir_const(ira, instruction, ptr_type);
|
|
ZigValue *const_val = const_instr->value;
|
|
const_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
const_val->data.x_ptr.mut = ptr_mut;
|
|
const_val->data.x_ptr.data.ref.pointee = pointee;
|
|
return const_instr;
|
|
}
|
|
|
|
static Error ir_resolve_const_val(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
|
|
ZigValue *val, UndefAllowed undef_allowed)
|
|
{
|
|
Error err;
|
|
for (;;) {
|
|
switch (val->special) {
|
|
case ConstValSpecialStatic:
|
|
return ErrorNone;
|
|
case ConstValSpecialRuntime:
|
|
if (!type_has_bits(codegen, val->type))
|
|
return ErrorNone;
|
|
|
|
exec_add_error_node_gen(codegen, exec, source_node,
|
|
buf_sprintf("unable to evaluate constant expression"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ConstValSpecialUndef:
|
|
if (undef_allowed == UndefOk || undef_allowed == LazyOk)
|
|
return ErrorNone;
|
|
|
|
exec_add_error_node_gen(codegen, exec, source_node,
|
|
buf_sprintf("use of undefined value here causes undefined behavior"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ConstValSpecialLazy:
|
|
if (undef_allowed == LazyOk || undef_allowed == LazyOkNoUndef)
|
|
return ErrorNone;
|
|
|
|
if ((err = ir_resolve_lazy(codegen, source_node, val)))
|
|
return err;
|
|
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
static ZigValue *ir_resolve_const(IrAnalyze *ira, IrInstGen *value, UndefAllowed undef_allowed) {
|
|
Error err;
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, value->base.source_node,
|
|
value->value, undef_allowed)))
|
|
{
|
|
return nullptr;
|
|
}
|
|
return value->value;
|
|
}
|
|
|
|
Error ir_eval_const_value(CodeGen *codegen, Scope *scope, AstNode *node,
|
|
ZigValue *return_ptr, size_t *backward_branch_count, size_t *backward_branch_quota,
|
|
ZigFn *fn_entry, Buf *c_import_buf, AstNode *source_node, Buf *exec_name,
|
|
IrExecutableGen *parent_exec, AstNode *expected_type_source_node, UndefAllowed undef_allowed)
|
|
{
|
|
Error err;
|
|
|
|
src_assert(return_ptr->type->id == ZigTypeIdPointer, source_node);
|
|
|
|
if (type_is_invalid(return_ptr->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
IrExecutableSrc *ir_executable = heap::c_allocator.create<IrExecutableSrc>();
|
|
ir_executable->source_node = source_node;
|
|
ir_executable->parent_exec = parent_exec;
|
|
ir_executable->name = exec_name;
|
|
ir_executable->is_inline = true;
|
|
ir_executable->fn_entry = fn_entry;
|
|
ir_executable->c_import_buf = c_import_buf;
|
|
ir_executable->begin_scope = scope;
|
|
|
|
if (!ir_gen(codegen, node, scope, ir_executable))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if (ir_executable->first_err_trace_msg != nullptr) {
|
|
codegen->trace_err = ir_executable->first_err_trace_msg;
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if (codegen->verbose_ir) {
|
|
fprintf(stderr, "\nSource: ");
|
|
ast_render(stderr, node, 4);
|
|
fprintf(stderr, "\n{ // (IR)\n");
|
|
ir_print_src(codegen, stderr, ir_executable, 2);
|
|
fprintf(stderr, "}\n");
|
|
}
|
|
IrExecutableGen *analyzed_executable = heap::c_allocator.create<IrExecutableGen>();
|
|
analyzed_executable->source_node = source_node;
|
|
analyzed_executable->parent_exec = parent_exec;
|
|
analyzed_executable->source_exec = ir_executable;
|
|
analyzed_executable->name = exec_name;
|
|
analyzed_executable->is_inline = true;
|
|
analyzed_executable->fn_entry = fn_entry;
|
|
analyzed_executable->c_import_buf = c_import_buf;
|
|
analyzed_executable->backward_branch_count = backward_branch_count;
|
|
analyzed_executable->backward_branch_quota = backward_branch_quota;
|
|
analyzed_executable->begin_scope = scope;
|
|
ZigType *result_type = ir_analyze(codegen, ir_executable, analyzed_executable,
|
|
return_ptr->type->data.pointer.child_type, expected_type_source_node, return_ptr);
|
|
if (type_is_invalid(result_type)) {
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if (codegen->verbose_ir) {
|
|
fprintf(stderr, "{ // (analyzed)\n");
|
|
ir_print_gen(codegen, stderr, analyzed_executable, 2);
|
|
fprintf(stderr, "}\n");
|
|
}
|
|
|
|
if ((err = ir_exec_scan_for_side_effects(codegen, analyzed_executable)))
|
|
return err;
|
|
|
|
ZigValue *result = const_ptr_pointee(nullptr, codegen, return_ptr, source_node);
|
|
if (result == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
if ((err = ir_resolve_const_val(codegen, analyzed_executable, node, result, undef_allowed)))
|
|
return err;
|
|
|
|
return ErrorNone;
|
|
}
|
|
|
|
static ErrorTableEntry *ir_resolve_error(IrAnalyze *ira, IrInstGen *err_value) {
|
|
if (type_is_invalid(err_value->value->type))
|
|
return nullptr;
|
|
|
|
if (err_value->value->type->id != ZigTypeIdErrorSet) {
|
|
ir_add_error_node(ira, err_value->base.source_node,
|
|
buf_sprintf("expected error, found '%s'", buf_ptr(&err_value->value->type->name)));
|
|
return nullptr;
|
|
}
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, err_value, UndefBad);
|
|
if (!const_val)
|
|
return nullptr;
|
|
|
|
assert(const_val->data.x_err_set != nullptr);
|
|
return const_val->data.x_err_set;
|
|
}
|
|
|
|
static ZigType *ir_resolve_const_type(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
|
|
ZigValue *val)
|
|
{
|
|
Error err;
|
|
if ((err = ir_resolve_const_val(codegen, exec, source_node, val, UndefBad)))
|
|
return codegen->builtin_types.entry_invalid;
|
|
|
|
assert(val->data.x_type != nullptr);
|
|
return val->data.x_type;
|
|
}
|
|
|
|
static ZigValue *ir_resolve_type_lazy(IrAnalyze *ira, IrInstGen *type_value) {
|
|
if (type_is_invalid(type_value->value->type))
|
|
return nullptr;
|
|
|
|
if (type_value->value->type->id != ZigTypeIdMetaType) {
|
|
ir_add_error_node(ira, type_value->base.source_node,
|
|
buf_sprintf("expected type 'type', found '%s'", buf_ptr(&type_value->value->type->name)));
|
|
return nullptr;
|
|
}
|
|
|
|
Error err;
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, type_value->base.source_node,
|
|
type_value->value, LazyOk)))
|
|
{
|
|
return nullptr;
|
|
}
|
|
|
|
return type_value->value;
|
|
}
|
|
|
|
static ZigType *ir_resolve_type(IrAnalyze *ira, IrInstGen *type_value) {
|
|
ZigValue *val = ir_resolve_type_lazy(ira, type_value);
|
|
if (val == nullptr)
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
return ir_resolve_const_type(ira->codegen, ira->new_irb.exec, type_value->base.source_node, val);
|
|
}
|
|
|
|
static Error ir_validate_vector_elem_type(IrAnalyze *ira, AstNode *source_node, ZigType *elem_type) {
|
|
Error err;
|
|
bool is_valid;
|
|
if ((err = is_valid_vector_elem_type(ira->codegen, elem_type, &is_valid)))
|
|
return err;
|
|
if (!is_valid) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("vector element type must be integer, float, bool, or pointer; '%s' is invalid",
|
|
buf_ptr(&elem_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
|
|
static ZigType *ir_resolve_vector_elem_type(IrAnalyze *ira, IrInstGen *elem_type_value) {
|
|
Error err;
|
|
ZigType *elem_type = ir_resolve_type(ira, elem_type_value);
|
|
if (type_is_invalid(elem_type))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
if ((err = ir_validate_vector_elem_type(ira, elem_type_value->base.source_node, elem_type)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
return elem_type;
|
|
}
|
|
|
|
static ZigType *ir_resolve_int_type(IrAnalyze *ira, IrInstGen *type_value) {
|
|
ZigType *ty = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(ty))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
if (ty->id != ZigTypeIdInt) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
|
|
buf_sprintf("expected integer type, found '%s'", buf_ptr(&ty->name)));
|
|
if (ty->id == ZigTypeIdVector &&
|
|
ty->data.vector.elem_type->id == ZigTypeIdInt)
|
|
{
|
|
add_error_note(ira->codegen, msg, type_value->base.source_node,
|
|
buf_sprintf("represent vectors with their element types, i.e. '%s'",
|
|
buf_ptr(&ty->data.vector.elem_type->name)));
|
|
}
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
return ty;
|
|
}
|
|
|
|
static ZigType *ir_resolve_error_set_type(IrAnalyze *ira, IrInst *op_source, IrInstGen *type_value) {
|
|
if (type_is_invalid(type_value->value->type))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
if (type_value->value->type->id != ZigTypeIdMetaType) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
|
|
buf_sprintf("expected error set type, found '%s'", buf_ptr(&type_value->value->type->name)));
|
|
add_error_note(ira->codegen, msg, op_source->source_node,
|
|
buf_sprintf("`||` merges error sets; `or` performs boolean OR"));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, type_value, UndefBad);
|
|
if (!const_val)
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
assert(const_val->data.x_type != nullptr);
|
|
ZigType *result_type = const_val->data.x_type;
|
|
if (result_type->id != ZigTypeIdErrorSet) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
|
|
buf_sprintf("expected error set type, found type '%s'", buf_ptr(&result_type->name)));
|
|
add_error_note(ira->codegen, msg, op_source->source_node,
|
|
buf_sprintf("`||` merges error sets; `or` performs boolean OR"));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
return result_type;
|
|
}
|
|
|
|
static ZigFn *ir_resolve_fn(IrAnalyze *ira, IrInstGen *fn_value) {
|
|
if (type_is_invalid(fn_value->value->type))
|
|
return nullptr;
|
|
|
|
if (fn_value->value->type->id != ZigTypeIdFn) {
|
|
ir_add_error_node(ira, fn_value->base.source_node,
|
|
buf_sprintf("expected function type, found '%s'", buf_ptr(&fn_value->value->type->name)));
|
|
return nullptr;
|
|
}
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, fn_value, UndefBad);
|
|
if (!const_val)
|
|
return nullptr;
|
|
|
|
// May be a ConstPtrSpecialHardCodedAddr
|
|
if (const_val->data.x_ptr.special != ConstPtrSpecialFunction)
|
|
return nullptr;
|
|
|
|
return const_val->data.x_ptr.data.fn.fn_entry;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_optional_wrap(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *wanted_type, ResultLoc *result_loc)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdOptional);
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigType *payload_type = wanted_type->data.maybe.child_type;
|
|
IrInstGen *casted_payload = ir_implicit_cast(ira, value, payload_type);
|
|
if (type_is_invalid(casted_payload->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *val = ir_resolve_const(ira, casted_payload, UndefOk);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
if (types_have_same_zig_comptime_repr(ira->codegen, wanted_type, payload_type)) {
|
|
copy_const_val(ira->codegen, const_instruction->base.value, val);
|
|
} else {
|
|
const_instruction->base.value->data.x_optional = val;
|
|
}
|
|
const_instruction->base.value->type = wanted_type;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
if (result_loc == nullptr && handle_is_ptr(ira->codegen, wanted_type)) {
|
|
result_loc = no_result_loc();
|
|
}
|
|
IrInstGen *result_loc_inst = nullptr;
|
|
if (result_loc != nullptr) {
|
|
result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) ||
|
|
result_loc_inst->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return result_loc_inst;
|
|
}
|
|
}
|
|
IrInstGen *result = ir_build_optional_wrap(ira, source_instr, wanted_type, value, result_loc_inst);
|
|
result->value->data.rh_maybe = RuntimeHintOptionalNonNull;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *wanted_type, ResultLoc *result_loc)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdErrorUnion);
|
|
|
|
ZigType *payload_type = wanted_type->data.error_union.payload_type;
|
|
ZigType *err_set_type = wanted_type->data.error_union.err_set_type;
|
|
if (instr_is_comptime(value)) {
|
|
IrInstGen *casted_payload = ir_implicit_cast(ira, value, payload_type);
|
|
if (type_is_invalid(casted_payload->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *val = ir_resolve_const(ira, casted_payload, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *err_set_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
err_set_val->type = err_set_type;
|
|
err_set_val->special = ConstValSpecialStatic;
|
|
err_set_val->data.x_err_set = nullptr;
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->type = wanted_type;
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
const_instruction->base.value->data.x_err_union.error_set = err_set_val;
|
|
const_instruction->base.value->data.x_err_union.payload = val;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
IrInstGen *result_loc_inst;
|
|
if (handle_is_ptr(ira->codegen, wanted_type)) {
|
|
if (result_loc == nullptr) result_loc = no_result_loc();
|
|
result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) ||
|
|
result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc_inst;
|
|
}
|
|
} else {
|
|
result_loc_inst = nullptr;
|
|
}
|
|
|
|
IrInstGen *result = ir_build_err_wrap_payload(ira, source_instr, wanted_type, value, result_loc_inst);
|
|
result->value->data.rh_error_union = RuntimeHintErrorUnionNonError;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_err_set_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
|
|
ZigType *wanted_type)
|
|
{
|
|
assert(value->value->type->id == ZigTypeIdErrorSet);
|
|
assert(wanted_type->id == ZigTypeIdErrorSet);
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!type_is_global_error_set(wanted_type)) {
|
|
bool subset = false;
|
|
for (uint32_t i = 0, count = wanted_type->data.error_set.err_count; i < count; i += 1) {
|
|
if (wanted_type->data.error_set.errors[i]->value == val->data.x_err_set->value) {
|
|
subset = true;
|
|
break;
|
|
}
|
|
}
|
|
if (!subset) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("error.%s not a member of error set '%s'",
|
|
buf_ptr(&val->data.x_err_set->name), buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->type = wanted_type;
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
const_instruction->base.value->data.x_err_set = val->data.x_err_set;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
return ir_build_cast(ira, source_instr, wanted_type, value, CastOpErrSet);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *frame_ptr, ZigType *wanted_type)
|
|
{
|
|
if (instr_is_comptime(frame_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, frame_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ir_assert(ptr_val->type->id == ZigTypeIdPointer, source_instr);
|
|
if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
zig_panic("TODO comptime frame pointer");
|
|
}
|
|
}
|
|
|
|
return ir_build_cast(ira, source_instr, wanted_type, frame_ptr, CastOpBitCast);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *wanted_type)
|
|
{
|
|
if (instr_is_comptime(value)) {
|
|
zig_panic("TODO comptime anyframe->T to anyframe");
|
|
}
|
|
|
|
return ir_build_cast(ira, source_instr, wanted_type, value, CastOpBitCast);
|
|
}
|
|
|
|
|
|
static IrInstGen *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
|
|
ZigType *wanted_type, ResultLoc *result_loc)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdErrorUnion);
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, wanted_type->data.error_union.err_set_type);
|
|
|
|
if (instr_is_comptime(casted_value)) {
|
|
ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *err_set_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
err_set_val->special = ConstValSpecialStatic;
|
|
err_set_val->type = wanted_type->data.error_union.err_set_type;
|
|
err_set_val->data.x_err_set = val->data.x_err_set;
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->type = wanted_type;
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
const_instruction->base.value->data.x_err_union.error_set = err_set_val;
|
|
const_instruction->base.value->data.x_err_union.payload = nullptr;
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
IrInstGen *result_loc_inst;
|
|
if (handle_is_ptr(ira->codegen, wanted_type)) {
|
|
if (result_loc == nullptr) result_loc = no_result_loc();
|
|
result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) ||
|
|
result_loc_inst->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return result_loc_inst;
|
|
}
|
|
} else {
|
|
result_loc_inst = nullptr;
|
|
}
|
|
|
|
|
|
IrInstGen *result = ir_build_err_wrap_code(ira, source_instr, wanted_type, value, result_loc_inst);
|
|
result->value->data.rh_error_union = RuntimeHintErrorUnionError;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value, ZigType *wanted_type) {
|
|
assert(wanted_type->id == ZigTypeIdOptional);
|
|
assert(instr_is_comptime(value));
|
|
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
assert(val != nullptr);
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
|
|
if (get_src_ptr_type(wanted_type) != nullptr) {
|
|
result->value->data.x_ptr.special = ConstPtrSpecialNull;
|
|
} else if (is_opt_err_set(wanted_type)) {
|
|
result->value->data.x_err_set = nullptr;
|
|
} else {
|
|
result->value->data.x_optional = nullptr;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_null_to_c_pointer(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *value, ZigType *wanted_type)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdPointer);
|
|
assert(wanted_type->data.pointer.ptr_len == PtrLenC);
|
|
assert(instr_is_comptime(value));
|
|
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
assert(val != nullptr);
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->data.x_ptr.special = ConstPtrSpecialNull;
|
|
result->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_get_ref2(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *value,
|
|
ZigType *elem_type, bool is_const, bool is_volatile)
|
|
{
|
|
Error err;
|
|
|
|
if (type_is_invalid(elem_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, LazyOk);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_get_const_ptr(ira, source_instruction, val, elem_type,
|
|
ConstPtrMutComptimeConst, is_const, is_volatile, 0);
|
|
}
|
|
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, elem_type,
|
|
is_const, is_volatile, PtrLenSingle, 0, 0, 0, false);
|
|
|
|
if ((err = type_resolve(ira->codegen, ptr_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result_loc;
|
|
if (type_has_bits(ira->codegen, ptr_type) && !handle_is_ptr(ira->codegen, elem_type)) {
|
|
result_loc = ir_resolve_result(ira, source_instruction, no_result_loc(), elem_type, nullptr, true, true);
|
|
} else {
|
|
result_loc = nullptr;
|
|
}
|
|
|
|
IrInstGen *new_instruction = ir_build_ref_gen(ira, source_instruction, ptr_type, value, result_loc);
|
|
new_instruction->value->data.rh_ptr = RuntimeHintPtrStack;
|
|
return new_instruction;
|
|
}
|
|
|
|
static IrInstGen *ir_get_ref(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *value,
|
|
bool is_const, bool is_volatile)
|
|
{
|
|
return ir_get_ref2(ira, source_instruction, value, value->value->type, is_const, is_volatile);
|
|
}
|
|
|
|
static ZigType *ir_resolve_union_tag_type(IrAnalyze *ira, AstNode *source_node, ZigType *union_type) {
|
|
assert(union_type->id == ZigTypeIdUnion);
|
|
|
|
Error err;
|
|
if ((err = type_resolve(ira->codegen, union_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
AstNode *decl_node = union_type->data.unionation.decl_node;
|
|
if (decl_node->data.container_decl.auto_enum || decl_node->data.container_decl.init_arg_expr != nullptr) {
|
|
assert(union_type->data.unionation.tag_type != nullptr);
|
|
return union_type->data.unionation.tag_type;
|
|
} else {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node, buf_sprintf("union '%s' has no tag",
|
|
buf_ptr(&union_type->name)));
|
|
add_error_note(ira->codegen, msg, decl_node, buf_sprintf("consider 'union(enum)' here"));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
|
|
static bool can_fold_enum_type(ZigType *ty) {
|
|
assert(ty->id == ZigTypeIdEnum);
|
|
// We can fold the enum type (and avoid any check, be it at runtime or at
|
|
// compile time) iff it has only a single element and its tag type is
|
|
// zero-sized.
|
|
ZigType *tag_int_type = ty->data.enumeration.tag_int_type;
|
|
return ty->data.enumeration.layout == ContainerLayoutAuto &&
|
|
ty->data.enumeration.src_field_count == 1 &&
|
|
!ty->data.enumeration.non_exhaustive &&
|
|
(tag_int_type->id == ZigTypeIdInt && tag_int_type->data.integral.bit_count == 0);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_enum_to_int(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target) {
|
|
Error err;
|
|
|
|
IrInstGen *enum_target;
|
|
ZigType *enum_type;
|
|
if (target->value->type->id == ZigTypeIdUnion) {
|
|
enum_type = ir_resolve_union_tag_type(ira, target->base.source_node, target->value->type);
|
|
if (type_is_invalid(enum_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
enum_target = ir_implicit_cast(ira, target, enum_type);
|
|
if (type_is_invalid(enum_target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (target->value->type->id == ZigTypeIdEnum) {
|
|
enum_target = target;
|
|
enum_type = target->value->type;
|
|
} else {
|
|
ir_add_error_node(ira, target->base.source_node,
|
|
buf_sprintf("expected enum, found type '%s'", buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, enum_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *tag_type = enum_type->data.enumeration.tag_int_type;
|
|
assert(tag_type->id == ZigTypeIdInt || tag_type->id == ZigTypeIdComptimeInt);
|
|
|
|
// If there is only one possible tag, then we know at comptime what it is.
|
|
if (can_fold_enum_type(enum_type)) {
|
|
IrInstGen *result = ir_const(ira, source_instr, tag_type);
|
|
init_const_bigint(result->value, tag_type,
|
|
&enum_type->data.enumeration.fields[0].value);
|
|
return result;
|
|
}
|
|
|
|
if (instr_is_comptime(enum_target)) {
|
|
ZigValue *val = ir_resolve_const(ira, enum_target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *result = ir_const(ira, source_instr, tag_type);
|
|
init_const_bigint(result->value, tag_type, &val->data.x_enum_tag);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_widen_or_shorten(ira, source_instr->scope, source_instr->source_node, enum_target, tag_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_union_to_tag(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *target, ZigType *wanted_type)
|
|
{
|
|
assert(target->value->type->id == ZigTypeIdUnion);
|
|
assert(wanted_type->id == ZigTypeIdEnum);
|
|
assert(wanted_type == target->value->type->data.unionation.tag_type);
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
result->value->type = wanted_type;
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &val->data.x_union.tag);
|
|
return result;
|
|
}
|
|
|
|
// If there is only 1 possible tag, then we know at comptime what it is.
|
|
if (can_fold_enum_type(wanted_type)) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
result->value->type = wanted_type;
|
|
TypeEnumField *enum_field = target->value->type->data.unionation.fields[0].enum_field;
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &enum_field->value);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_union_tag(ira, source_instr, target, wanted_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_undefined_to_anything(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *target, ZigType *wanted_type)
|
|
{
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialUndef;
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_enum_to_union(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *uncasted_target, ZigType *wanted_type)
|
|
{
|
|
Error err;
|
|
assert(wanted_type->id == ZigTypeIdUnion);
|
|
|
|
if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *target = ir_implicit_cast(ira, uncasted_target, wanted_type->data.unionation.tag_type);
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
|
|
if (union_field == nullptr) {
|
|
Buf *int_buf = buf_alloc();
|
|
bigint_append_buf(int_buf, &target->value->data.x_enum_tag, 10);
|
|
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("no tag by value %s", buf_ptr(int_buf)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *field_type = resolve_union_field_type(ira->codegen, union_field);
|
|
if (field_type == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if ((err = type_resolve(ira->codegen, field_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
switch (type_has_one_possible_value(ira->codegen, field_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueNo: {
|
|
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
|
|
union_field->enum_field->decl_index);
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("cast to union '%s' must initialize '%s' field '%s'",
|
|
buf_ptr(&wanted_type->name),
|
|
buf_ptr(&field_type->name),
|
|
buf_ptr(union_field->name)));
|
|
add_error_note(ira->codegen, msg, field_node,
|
|
buf_sprintf("field '%s' declared here", buf_ptr(union_field->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
case OnePossibleValueYes:
|
|
break;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
result->value->type = wanted_type;
|
|
bigint_init_bigint(&result->value->data.x_union.tag, &val->data.x_enum_tag);
|
|
result->value->data.x_union.payload = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->value->data.x_union.payload->special = ConstValSpecialStatic;
|
|
result->value->data.x_union.payload->type = field_type;
|
|
return result;
|
|
}
|
|
|
|
if (target->value->type->data.enumeration.non_exhaustive) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("runtime cast to union '%s' from non-exhustive enum",
|
|
buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// if the union has all fields 0 bits, we can do it
|
|
// and in fact it's a noop cast because the union value is just the enum value
|
|
if (wanted_type->data.unionation.gen_field_count == 0) {
|
|
return ir_build_cast(ira, &target->base, wanted_type, target, CastOpNoop);
|
|
}
|
|
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("runtime cast to union '%s' which has non-void fields",
|
|
buf_ptr(&wanted_type->name)));
|
|
for (uint32_t i = 0; i < wanted_type->data.unionation.src_field_count; i += 1) {
|
|
TypeUnionField *union_field = &wanted_type->data.unionation.fields[i];
|
|
ZigType *field_type = resolve_union_field_type(ira->codegen, union_field);
|
|
if (field_type == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
bool has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, field_type, &has_bits)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (has_bits) {
|
|
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(i);
|
|
add_error_note(ira->codegen, msg, field_node,
|
|
buf_sprintf("field '%s' has type '%s'",
|
|
buf_ptr(union_field->name),
|
|
buf_ptr(&field_type->name)));
|
|
}
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static bool value_numeric_fits_in_type(ZigValue *value, ZigType *type_entry);
|
|
|
|
static IrInstGen *ir_analyze_widen_or_shorten(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *target, ZigType *wanted_type)
|
|
{
|
|
ZigType *wanted_scalar_type = (target->value->type->id == ZigTypeIdVector) ?
|
|
wanted_type->data.vector.elem_type : wanted_type;
|
|
|
|
assert(wanted_scalar_type->id == ZigTypeIdInt || wanted_scalar_type->id == ZigTypeIdFloat);
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (wanted_scalar_type->id == ZigTypeIdInt) {
|
|
if (!wanted_scalar_type->data.integral.is_signed && value_cmp_numeric_val_any(val, CmpLT, nullptr)) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("attempt to cast negative value to unsigned integer"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!value_numeric_fits_in_type(val, wanted_scalar_type)) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("cast from '%s' to '%s' truncates bits",
|
|
buf_ptr(&target->value->type->name), buf_ptr(&wanted_scalar_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->type = wanted_type;
|
|
|
|
if (wanted_type->id == ZigTypeIdVector) {
|
|
result->value->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(wanted_type->data.vector.len);
|
|
|
|
for (size_t i = 0; i < wanted_type->data.vector.len; i++) {
|
|
ZigValue *scalar_dest_value = &result->value->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_src_value = &val->data.x_array.data.s_none.elements[i];
|
|
|
|
scalar_dest_value->type = wanted_scalar_type;
|
|
scalar_dest_value->special = ConstValSpecialStatic;
|
|
|
|
if (wanted_scalar_type->id == ZigTypeIdInt) {
|
|
bigint_init_bigint(&scalar_dest_value->data.x_bigint, &scalar_src_value->data.x_bigint);
|
|
} else {
|
|
float_init_float(scalar_dest_value, scalar_src_value);
|
|
}
|
|
}
|
|
} else {
|
|
if (wanted_type->id == ZigTypeIdInt) {
|
|
bigint_init_bigint(&result->value->data.x_bigint, &val->data.x_bigint);
|
|
} else {
|
|
float_init_float(result->value, val);
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
// If the destination integer type has no bits, then we can emit a comptime
|
|
// zero. However, we still want to emit a runtime safety check to make sure
|
|
// the target is zero.
|
|
if (!type_has_bits(ira->codegen, wanted_type)) {
|
|
assert(wanted_type->id == ZigTypeIdInt);
|
|
assert(type_has_bits(ira->codegen, target->value->type));
|
|
ir_build_assert_zero(ira, source_instr, target);
|
|
IrInstGen *result = ir_const_unsigned(ira, source_instr, 0);
|
|
result->value->type = wanted_type;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_widen_or_shorten(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_int_to_enum(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *target, ZigType *wanted_type)
|
|
{
|
|
Error err;
|
|
assert(wanted_type->id == ZigTypeIdEnum);
|
|
|
|
ZigType *actual_type = target->value->type;
|
|
|
|
if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (actual_type != wanted_type->data.enumeration.tag_int_type) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("integer to enum cast from '%s' instead of its tag type, '%s'",
|
|
buf_ptr(&actual_type->name),
|
|
buf_ptr(&wanted_type->data.enumeration.tag_int_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
assert(actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt);
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeEnumField *field = find_enum_field_by_tag(wanted_type, &val->data.x_bigint);
|
|
if (field == nullptr && !wanted_type->data.enumeration.non_exhaustive) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &val->data.x_bigint, 10);
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("enum '%s' has no tag matching integer value %s",
|
|
buf_ptr(&wanted_type->name), buf_ptr(val_buf)));
|
|
add_error_note(ira->codegen, msg, wanted_type->data.enumeration.decl_node,
|
|
buf_sprintf("'%s' declared here", buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &val->data.x_bigint);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_int_to_enum_gen(ira, source_instr->scope, source_instr->source_node, wanted_type, target);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_number_to_literal(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *target, ZigType *wanted_type)
|
|
{
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
if (wanted_type->id == ZigTypeIdComptimeFloat) {
|
|
float_init_float(result->value, val);
|
|
} else if (wanted_type->id == ZigTypeIdComptimeInt) {
|
|
bigint_init_bigint(&result->value->data.x_bigint, &val->data.x_bigint);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_int_to_err(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
|
|
ZigType *wanted_type)
|
|
{
|
|
assert(target->value->type->id == ZigTypeIdInt);
|
|
assert(!target->value->type->data.integral.is_signed);
|
|
assert(wanted_type->id == ZigTypeIdErrorSet);
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (type_is_global_error_set(wanted_type)) {
|
|
BigInt err_count;
|
|
bigint_init_unsigned(&err_count, ira->codegen->errors_by_index.length);
|
|
|
|
if (bigint_cmp_zero(&val->data.x_bigint) == CmpEQ || bigint_cmp(&val->data.x_bigint, &err_count) != CmpLT) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &val->data.x_bigint, 10);
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("integer value %s represents no error", buf_ptr(val_buf)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
size_t index = bigint_as_usize(&val->data.x_bigint);
|
|
result->value->data.x_err_set = ira->codegen->errors_by_index.at(index);
|
|
return result;
|
|
} else {
|
|
ErrorTableEntry *err = nullptr;
|
|
BigInt err_int;
|
|
|
|
for (uint32_t i = 0, count = wanted_type->data.error_set.err_count; i < count; i += 1) {
|
|
ErrorTableEntry *this_err = wanted_type->data.error_set.errors[i];
|
|
bigint_init_unsigned(&err_int, this_err->value);
|
|
if (bigint_cmp(&val->data.x_bigint, &err_int) == CmpEQ) {
|
|
err = this_err;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (err == nullptr) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &val->data.x_bigint, 10);
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("integer value %s represents no error in '%s'", buf_ptr(val_buf), buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
result->value->data.x_err_set = err;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_int_to_err_gen(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_err_to_int(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
|
|
ZigType *wanted_type)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdInt);
|
|
|
|
ZigType *err_type = target->value->type;
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
|
|
ErrorTableEntry *err;
|
|
if (err_type->id == ZigTypeIdErrorUnion) {
|
|
err = val->data.x_err_union.error_set->data.x_err_set;
|
|
} else if (err_type->id == ZigTypeIdErrorSet) {
|
|
err = val->data.x_err_set;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
result->value->type = wanted_type;
|
|
uint64_t err_value = err ? err->value : 0;
|
|
bigint_init_unsigned(&result->value->data.x_bigint, err_value);
|
|
|
|
if (!bigint_fits_in_bits(&result->value->data.x_bigint,
|
|
wanted_type->data.integral.bit_count, wanted_type->data.integral.is_signed))
|
|
{
|
|
ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("error code '%s' does not fit in '%s'",
|
|
buf_ptr(&err->name), buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
ZigType *err_set_type;
|
|
if (err_type->id == ZigTypeIdErrorUnion) {
|
|
err_set_type = err_type->data.error_union.err_set_type;
|
|
} else if (err_type->id == ZigTypeIdErrorSet) {
|
|
err_set_type = err_type;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
if (!type_is_global_error_set(err_set_type)) {
|
|
if (!resolve_inferred_error_set(ira->codegen, err_set_type, source_instr->source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (err_set_type->data.error_set.err_count == 0) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, 0);
|
|
return result;
|
|
} else if (err_set_type->data.error_set.err_count == 1) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
ErrorTableEntry *err = err_set_type->data.error_set.errors[0];
|
|
bigint_init_unsigned(&result->value->data.x_bigint, err->value);
|
|
return result;
|
|
}
|
|
}
|
|
|
|
BigInt bn;
|
|
bigint_init_unsigned(&bn, ira->codegen->errors_by_index.length);
|
|
if (!bigint_fits_in_bits(&bn, wanted_type->data.integral.bit_count, wanted_type->data.integral.is_signed)) {
|
|
ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("too many error values to fit in '%s'", buf_ptr(&wanted_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_build_err_to_int_gen(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
|
|
ZigType *wanted_type)
|
|
{
|
|
assert(wanted_type->id == ZigTypeIdPointer);
|
|
Error err;
|
|
if ((err = type_resolve(ira->codegen, target->value->type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
assert((wanted_type->data.pointer.is_const && target->value->type->data.pointer.is_const) || !target->value->type->data.pointer.is_const);
|
|
wanted_type = adjust_ptr_align(ira->codegen, wanted_type, get_ptr_align(ira->codegen, target->value->type));
|
|
ZigType *array_type = wanted_type->data.pointer.child_type;
|
|
assert(array_type->id == ZigTypeIdArray);
|
|
assert(array_type->data.array.len == 1);
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(val->type->id == ZigTypeIdPointer);
|
|
ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node);
|
|
if (pointee == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (pointee->special != ConstValSpecialRuntime) {
|
|
ZigValue *array_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
array_val->special = ConstValSpecialStatic;
|
|
array_val->type = array_type;
|
|
array_val->data.x_array.special = ConstArraySpecialNone;
|
|
array_val->data.x_array.data.s_none.elements = pointee;
|
|
array_val->parent.id = ConstParentIdScalar;
|
|
array_val->parent.data.p_scalar.scalar_val = pointee;
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->type = wanted_type;
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
const_instruction->base.value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
const_instruction->base.value->data.x_ptr.data.ref.pointee = array_val;
|
|
const_instruction->base.value->data.x_ptr.mut = val->data.x_ptr.mut;
|
|
return &const_instruction->base;
|
|
}
|
|
}
|
|
|
|
// pointer to array and pointer to single item are represented the same way at runtime
|
|
return ir_build_cast(ira, &target->base, wanted_type, target, CastOpBitCast);
|
|
}
|
|
|
|
static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCastOnly *cast_result,
|
|
ErrorMsg *parent_msg)
|
|
{
|
|
switch (cast_result->id) {
|
|
case ConstCastResultIdOk:
|
|
zig_unreachable();
|
|
case ConstCastResultIdInvalid:
|
|
zig_unreachable();
|
|
case ConstCastResultIdOptionalChild: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("optional type child '%s' cannot cast into optional type child '%s'",
|
|
buf_ptr(&cast_result->data.optional->actual_child->name),
|
|
buf_ptr(&cast_result->data.optional->wanted_child->name)));
|
|
report_recursive_error(ira, source_node, &cast_result->data.optional->child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdOptionalShape: {
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("optional type child '%s' cannot cast into optional type '%s'",
|
|
buf_ptr(&cast_result->data.type_mismatch->actual_type->name),
|
|
buf_ptr(&cast_result->data.type_mismatch->wanted_type->name)));
|
|
break;
|
|
}
|
|
case ConstCastResultIdErrorUnionErrorSet: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("error set '%s' cannot cast into error set '%s'",
|
|
buf_ptr(&cast_result->data.error_union_error_set->actual_err_set->name),
|
|
buf_ptr(&cast_result->data.error_union_error_set->wanted_err_set->name)));
|
|
report_recursive_error(ira, source_node, &cast_result->data.error_union_error_set->child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdErrSet: {
|
|
ZigList<ErrorTableEntry *> *missing_errors = &cast_result->data.error_set_mismatch->missing_errors;
|
|
for (size_t i = 0; i < missing_errors->length; i += 1) {
|
|
ErrorTableEntry *error_entry = missing_errors->at(i);
|
|
add_error_note(ira->codegen, parent_msg, ast_field_to_symbol_node(error_entry->decl_node),
|
|
buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
|
|
}
|
|
break;
|
|
}
|
|
case ConstCastResultIdErrSetGlobal: {
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("cannot cast global error set into smaller set"));
|
|
break;
|
|
}
|
|
case ConstCastResultIdPointerChild: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("pointer type child '%s' cannot cast into pointer type child '%s'",
|
|
buf_ptr(&cast_result->data.pointer_mismatch->actual_child->name),
|
|
buf_ptr(&cast_result->data.pointer_mismatch->wanted_child->name)));
|
|
report_recursive_error(ira, source_node, &cast_result->data.pointer_mismatch->child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdSliceChild: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("slice type child '%s' cannot cast into slice type child '%s'",
|
|
buf_ptr(&cast_result->data.slice_mismatch->actual_child->name),
|
|
buf_ptr(&cast_result->data.slice_mismatch->wanted_child->name)));
|
|
report_recursive_error(ira, source_node, &cast_result->data.slice_mismatch->child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdErrorUnionPayload: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("error union payload '%s' cannot cast into error union payload '%s'",
|
|
buf_ptr(&cast_result->data.error_union_payload->actual_payload->name),
|
|
buf_ptr(&cast_result->data.error_union_payload->wanted_payload->name)));
|
|
report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdType: {
|
|
AstNode *wanted_decl_node = type_decl_node(cast_result->data.type_mismatch->wanted_type);
|
|
AstNode *actual_decl_node = type_decl_node(cast_result->data.type_mismatch->actual_type);
|
|
if (wanted_decl_node != nullptr) {
|
|
add_error_note(ira->codegen, parent_msg, wanted_decl_node,
|
|
buf_sprintf("%s declared here",
|
|
buf_ptr(&cast_result->data.type_mismatch->wanted_type->name)));
|
|
}
|
|
if (actual_decl_node != nullptr) {
|
|
add_error_note(ira->codegen, parent_msg, actual_decl_node,
|
|
buf_sprintf("%s declared here",
|
|
buf_ptr(&cast_result->data.type_mismatch->actual_type->name)));
|
|
}
|
|
break;
|
|
}
|
|
case ConstCastResultIdFnArg: {
|
|
ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("parameter %" ZIG_PRI_usize ": '%s' cannot cast into '%s'",
|
|
cast_result->data.fn_arg.arg_index,
|
|
buf_ptr(&cast_result->data.fn_arg.actual_param_type->name),
|
|
buf_ptr(&cast_result->data.fn_arg.expected_param_type->name)));
|
|
report_recursive_error(ira, source_node, cast_result->data.fn_arg.child, msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdBadAllowsZero: {
|
|
ZigType *wanted_type = cast_result->data.bad_allows_zero->wanted_type;
|
|
ZigType *actual_type = cast_result->data.bad_allows_zero->actual_type;
|
|
bool wanted_allows_zero = ptr_allows_addr_zero(wanted_type);
|
|
bool actual_allows_zero = ptr_allows_addr_zero(actual_type);
|
|
if (actual_allows_zero && !wanted_allows_zero) {
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("'%s' could have null values which are illegal in type '%s'",
|
|
buf_ptr(&actual_type->name),
|
|
buf_ptr(&wanted_type->name)));
|
|
} else {
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("mutable '%s' allows illegal null values stored to type '%s'",
|
|
buf_ptr(&wanted_type->name),
|
|
buf_ptr(&actual_type->name)));
|
|
}
|
|
break;
|
|
}
|
|
case ConstCastResultIdPtrLens: {
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("pointer length mismatch"));
|
|
break;
|
|
}
|
|
case ConstCastResultIdPtrSentinel: {
|
|
ZigType *actual_type = cast_result->data.bad_ptr_sentinel->actual_type;
|
|
ZigType *wanted_type = cast_result->data.bad_ptr_sentinel->wanted_type;
|
|
{
|
|
Buf *txt_msg = buf_sprintf("destination pointer requires a terminating '");
|
|
render_const_value(ira->codegen, txt_msg, wanted_type->data.pointer.sentinel);
|
|
buf_appendf(txt_msg, "' sentinel");
|
|
if (actual_type->data.pointer.sentinel != nullptr) {
|
|
buf_appendf(txt_msg, ", but source pointer has a terminating '");
|
|
render_const_value(ira->codegen, txt_msg, actual_type->data.pointer.sentinel);
|
|
buf_appendf(txt_msg, "' sentinel");
|
|
}
|
|
add_error_note(ira->codegen, parent_msg, source_node, txt_msg);
|
|
}
|
|
break;
|
|
}
|
|
case ConstCastResultIdSentinelArrays: {
|
|
ZigType *actual_type = cast_result->data.sentinel_arrays->actual_type;
|
|
ZigType *wanted_type = cast_result->data.sentinel_arrays->wanted_type;
|
|
Buf *txt_msg = buf_sprintf("destination array requires a terminating '");
|
|
render_const_value(ira->codegen, txt_msg, wanted_type->data.array.sentinel);
|
|
buf_appendf(txt_msg, "' sentinel");
|
|
if (actual_type->data.array.sentinel != nullptr) {
|
|
buf_appendf(txt_msg, ", but source array has a terminating '");
|
|
render_const_value(ira->codegen, txt_msg, actual_type->data.array.sentinel);
|
|
buf_appendf(txt_msg, "' sentinel");
|
|
}
|
|
add_error_note(ira->codegen, parent_msg, source_node, txt_msg);
|
|
break;
|
|
}
|
|
case ConstCastResultIdCV: {
|
|
ZigType *wanted_type = cast_result->data.bad_cv->wanted_type;
|
|
ZigType *actual_type = cast_result->data.bad_cv->actual_type;
|
|
bool ok_const = !actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const;
|
|
bool ok_volatile = !actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile;
|
|
if (!ok_const) {
|
|
add_error_note(ira->codegen, parent_msg, source_node, buf_sprintf("cast discards const qualifier"));
|
|
} else if (!ok_volatile) {
|
|
add_error_note(ira->codegen, parent_msg, source_node, buf_sprintf("cast discards volatile qualifier"));
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
}
|
|
case ConstCastResultIdFnIsGeneric:
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("only one of the functions is generic"));
|
|
break;
|
|
case ConstCastResultIdFnCC:
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("calling convention mismatch"));
|
|
break;
|
|
case ConstCastResultIdIntShorten: {
|
|
ZigType *wanted_type = cast_result->data.int_shorten->wanted_type;
|
|
ZigType *actual_type = cast_result->data.int_shorten->actual_type;
|
|
const char *wanted_signed = wanted_type->data.integral.is_signed ? "signed" : "unsigned";
|
|
const char *actual_signed = actual_type->data.integral.is_signed ? "signed" : "unsigned";
|
|
add_error_note(ira->codegen, parent_msg, source_node,
|
|
buf_sprintf("%s %" PRIu32 "-bit int cannot represent all possible %s %" PRIu32 "-bit values",
|
|
wanted_signed, wanted_type->data.integral.bit_count,
|
|
actual_signed, actual_type->data.integral.bit_count));
|
|
break;
|
|
}
|
|
case ConstCastResultIdVectorLength: // TODO
|
|
case ConstCastResultIdVectorChild: // TODO
|
|
case ConstCastResultIdFnAlign: // TODO
|
|
case ConstCastResultIdFnVarArgs: // TODO
|
|
case ConstCastResultIdFnReturnType: // TODO
|
|
case ConstCastResultIdFnArgCount: // TODO
|
|
case ConstCastResultIdFnGenericArgCount: // TODO
|
|
case ConstCastResultIdFnArgNoAlias: // TODO
|
|
case ConstCastResultIdUnresolvedInferredErrSet: // TODO
|
|
case ConstCastResultIdAsyncAllocatorType: // TODO
|
|
case ConstCastResultIdArrayChild: // TODO
|
|
break;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_array_to_vector(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *array, ZigType *vector_type)
|
|
{
|
|
if (instr_is_comptime(array)) {
|
|
// arrays and vectors have the same ZigValue representation
|
|
IrInstGen *result = ir_const(ira, source_instr, vector_type);
|
|
copy_const_val(ira->codegen, result->value, array->value);
|
|
result->value->type = vector_type;
|
|
return result;
|
|
}
|
|
return ir_build_array_to_vector(ira, source_instr, array, vector_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_vector_to_array(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *vector, ZigType *array_type, ResultLoc *result_loc)
|
|
{
|
|
if (instr_is_comptime(vector)) {
|
|
// arrays and vectors have the same ZigValue representation
|
|
IrInstGen *result = ir_const(ira, source_instr, array_type);
|
|
copy_const_val(ira->codegen, result->value, vector->value);
|
|
result->value->type = array_type;
|
|
return result;
|
|
}
|
|
if (result_loc == nullptr) {
|
|
result_loc = no_result_loc();
|
|
}
|
|
IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, array_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc_inst;
|
|
}
|
|
return ir_build_vector_to_array(ira, source_instr, array_type, vector, result_loc_inst);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *integer, ZigType *dest_type)
|
|
{
|
|
IrInstGen *unsigned_integer;
|
|
if (instr_is_comptime(integer)) {
|
|
unsigned_integer = integer;
|
|
} else {
|
|
assert(integer->value->type->id == ZigTypeIdInt);
|
|
|
|
if (integer->value->type->data.integral.bit_count >
|
|
ira->codegen->builtin_types.entry_usize->data.integral.bit_count)
|
|
{
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("integer type '%s' too big for implicit @intToPtr to type '%s'",
|
|
buf_ptr(&integer->value->type->name),
|
|
buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (integer->value->type->data.integral.is_signed) {
|
|
ZigType *unsigned_int_type = get_int_type(ira->codegen, false,
|
|
integer->value->type->data.integral.bit_count);
|
|
unsigned_integer = ir_analyze_bit_cast(ira, source_instr, integer, unsigned_int_type);
|
|
if (type_is_invalid(unsigned_integer->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
unsigned_integer = integer;
|
|
}
|
|
}
|
|
|
|
return ir_analyze_int_to_ptr(ira, source_instr, unsigned_integer, dest_type);
|
|
}
|
|
|
|
static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) {
|
|
if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer;
|
|
if (ty->id == ZigTypeIdFn) return true;
|
|
if (ty->id == ZigTypeIdOptional) {
|
|
ZigType *ptr_ty = ty->data.maybe.child_type;
|
|
if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer;
|
|
if (ptr_ty->id == ZigTypeIdFn) return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_enum_literal(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
|
|
ZigType *enum_type)
|
|
{
|
|
assert(enum_type->id == ZigTypeIdEnum);
|
|
|
|
Error err;
|
|
if ((err = type_resolve(ira->codegen, enum_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeEnumField *field = find_enum_type_field(enum_type, value->value->data.x_enum_literal);
|
|
if (field == nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("enum '%s' has no field named '%s'",
|
|
buf_ptr(&enum_type->name), buf_ptr(value->value->data.x_enum_literal)));
|
|
add_error_note(ira->codegen, msg, enum_type->data.enumeration.decl_node,
|
|
buf_sprintf("'%s' declared here", buf_ptr(&enum_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *result = ir_const(ira, source_instr, enum_type);
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &field->value);
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_struct_literal_to_array(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *wanted_type)
|
|
{
|
|
ir_add_error(ira, source_instr, buf_sprintf("TODO: type coercion of anon list literal to array"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_struct_literal_to_struct(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *struct_operand, ZigType *wanted_type)
|
|
{
|
|
Error err;
|
|
|
|
IrInstGen *struct_ptr = ir_get_ref(ira, source_instr, struct_operand, true, false);
|
|
if (type_is_invalid(struct_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (wanted_type->data.structure.resolve_status == ResolveStatusBeingInferred) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("type coercion of anon struct literal to inferred struct"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t actual_field_count = wanted_type->data.structure.src_field_count;
|
|
size_t instr_field_count = struct_operand->value->type->data.structure.src_field_count;
|
|
|
|
bool need_comptime = ir_should_inline(ira->old_irb.exec, source_instr->scope)
|
|
|| type_requires_comptime(ira->codegen, wanted_type) == ReqCompTimeYes;
|
|
bool is_comptime = true;
|
|
|
|
// Determine if the struct_operand will be comptime.
|
|
// Also emit compile errors for missing fields and duplicate fields.
|
|
AstNode **field_assign_nodes = heap::c_allocator.allocate<AstNode *>(actual_field_count);
|
|
ZigValue **field_values = heap::c_allocator.allocate<ZigValue *>(actual_field_count);
|
|
IrInstGen **casted_fields = heap::c_allocator.allocate<IrInstGen *>(actual_field_count);
|
|
IrInstGen *const_result = ir_const(ira, source_instr, wanted_type);
|
|
|
|
for (size_t i = 0; i < instr_field_count; i += 1) {
|
|
TypeStructField *src_field = struct_operand->value->type->data.structure.fields[i];
|
|
TypeStructField *dst_field = find_struct_type_field(wanted_type, src_field->name);
|
|
if (dst_field == nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("no field named '%s' in struct '%s'",
|
|
buf_ptr(src_field->name), buf_ptr(&wanted_type->name)));
|
|
if (wanted_type->data.structure.decl_node) {
|
|
add_error_note(ira->codegen, msg, wanted_type->data.structure.decl_node,
|
|
buf_sprintf("struct '%s' declared here", buf_ptr(&wanted_type->name)));
|
|
}
|
|
add_error_note(ira->codegen, msg, src_field->decl_node,
|
|
buf_sprintf("field '%s' declared here", buf_ptr(src_field->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ir_assert(src_field->decl_node != nullptr, source_instr);
|
|
AstNode *existing_assign_node = field_assign_nodes[dst_field->src_index];
|
|
if (existing_assign_node != nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("duplicate field"));
|
|
add_error_note(ira->codegen, msg, existing_assign_node, buf_sprintf("other field here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
field_assign_nodes[dst_field->src_index] = src_field->decl_node;
|
|
|
|
IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, src_field, struct_ptr,
|
|
struct_operand->value->type, false);
|
|
if (type_is_invalid(field_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *field_value = ir_get_deref(ira, source_instr, field_ptr, nullptr);
|
|
if (type_is_invalid(field_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, field_value, dst_field->type_entry);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
casted_fields[dst_field->src_index] = casted_value;
|
|
if (need_comptime || instr_is_comptime(casted_value)) {
|
|
ZigValue *field_val = ir_resolve_const(ira, casted_value, UndefOk);
|
|
if (field_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
field_val->parent.id = ConstParentIdStruct;
|
|
field_val->parent.data.p_struct.struct_val = const_result->value;
|
|
field_val->parent.data.p_struct.field_index = dst_field->src_index;
|
|
field_values[dst_field->src_index] = field_val;
|
|
if (field_val->type->id == ZigTypeIdUndefined && dst_field->type_entry->id != ZigTypeIdUndefined) {
|
|
field_values[dst_field->src_index]->special = ConstValSpecialUndef;
|
|
}
|
|
} else {
|
|
is_comptime = false;
|
|
}
|
|
}
|
|
|
|
bool any_missing = false;
|
|
for (size_t i = 0; i < actual_field_count; i += 1) {
|
|
if (field_assign_nodes[i] != nullptr) continue;
|
|
|
|
// look for a default field value
|
|
TypeStructField *field = wanted_type->data.structure.fields[i];
|
|
memoize_field_init_val(ira->codegen, wanted_type, field);
|
|
if (field->init_val == nullptr) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("missing field: '%s'", buf_ptr(field->name)));
|
|
any_missing = true;
|
|
continue;
|
|
}
|
|
if (type_is_invalid(field->init_val->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigValue *init_val_copy = ira->codegen->pass1_arena->create<ZigValue>();
|
|
copy_const_val(ira->codegen, init_val_copy, field->init_val);
|
|
init_val_copy->parent.id = ConstParentIdStruct;
|
|
init_val_copy->parent.data.p_struct.struct_val = const_result->value;
|
|
init_val_copy->parent.data.p_struct.field_index = i;
|
|
field_values[i] = init_val_copy;
|
|
casted_fields[i] = ir_const_move(ira, source_instr, init_val_copy);
|
|
}
|
|
if (any_missing)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (is_comptime) {
|
|
heap::c_allocator.deallocate(field_assign_nodes, actual_field_count);
|
|
IrInstGen *const_result = ir_const(ira, source_instr, wanted_type);
|
|
const_result->value->data.x_struct.fields = field_values;
|
|
return const_result;
|
|
}
|
|
|
|
IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, no_result_loc(),
|
|
wanted_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
for (size_t i = 0; i < actual_field_count; i += 1) {
|
|
TypeStructField *field = wanted_type->data.structure.fields[i];
|
|
IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, result_loc_inst, wanted_type, true);
|
|
if (type_is_invalid(field_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, field_ptr, casted_fields[i], true);
|
|
if (type_is_invalid(store_ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
heap::c_allocator.deallocate(field_assign_nodes, actual_field_count);
|
|
heap::c_allocator.deallocate(field_values, actual_field_count);
|
|
heap::c_allocator.deallocate(casted_fields, actual_field_count);
|
|
|
|
return ir_get_deref(ira, source_instr, result_loc_inst, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_struct_literal_to_union(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *value, ZigType *union_type)
|
|
{
|
|
Error err;
|
|
ZigType *struct_type = value->value->type;
|
|
|
|
assert(struct_type->id == ZigTypeIdStruct);
|
|
assert(union_type->id == ZigTypeIdUnion);
|
|
assert(struct_type->data.structure.src_field_count == 1);
|
|
|
|
TypeStructField *only_field = struct_type->data.structure.fields[0];
|
|
|
|
if ((err = type_resolve(ira->codegen, union_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeUnionField *union_field = find_union_type_field(union_type, only_field->name);
|
|
if (union_field == nullptr) {
|
|
ir_add_error_node(ira, only_field->decl_node,
|
|
buf_sprintf("no field named '%s' in union '%s'",
|
|
buf_ptr(only_field->name), buf_ptr(&union_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *payload_type = resolve_union_field_type(ira->codegen, union_field);
|
|
if (payload_type == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *field_value = ir_analyze_struct_value_field_value(ira, source_instr, value, only_field);
|
|
if (type_is_invalid(field_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, field_value, payload_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_value)) {
|
|
ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, union_type);
|
|
bigint_init_bigint(&result->value->data.x_union.tag, &union_field->enum_field->value);
|
|
result->value->data.x_union.payload = val;
|
|
|
|
val->parent.id = ConstParentIdUnion;
|
|
val->parent.data.p_union.union_val = result->value;
|
|
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, no_result_loc(),
|
|
union_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *payload_ptr = ir_analyze_container_field_ptr(ira, only_field->name, source_instr,
|
|
result_loc_inst, source_instr, union_type, true);
|
|
if (type_is_invalid(payload_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, payload_ptr, casted_value, false);
|
|
if (type_is_invalid(store_ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_get_deref(ira, source_instr, result_loc_inst, nullptr);
|
|
}
|
|
|
|
// Add a compile error and return ErrorSemanticAnalyzeFail if the pointer alignment does not work,
|
|
// otherwise return ErrorNone. Does not emit any instructions.
|
|
// Assumes that the pointer types have element types with the same ABI alignment. Avoids resolving the
|
|
// pointer types' alignments if both of the pointer types are ABI aligned.
|
|
static Error ir_cast_ptr_align(IrAnalyze *ira, IrInst* source_instr, ZigType *dest_ptr_type,
|
|
ZigType *src_ptr_type, AstNode *src_source_node)
|
|
{
|
|
Error err;
|
|
|
|
ir_assert(dest_ptr_type->id == ZigTypeIdPointer, source_instr);
|
|
ir_assert(src_ptr_type->id == ZigTypeIdPointer, source_instr);
|
|
|
|
if (dest_ptr_type->data.pointer.explicit_alignment == 0 &&
|
|
src_ptr_type->data.pointer.explicit_alignment == 0)
|
|
{
|
|
return ErrorNone;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, dest_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if ((err = type_resolve(ira->codegen, src_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
uint32_t wanted_align = get_ptr_align(ira->codegen, dest_ptr_type);
|
|
uint32_t actual_align = get_ptr_align(ira->codegen, src_ptr_type);
|
|
if (wanted_align > actual_align) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
|
|
add_error_note(ira->codegen, msg, src_source_node,
|
|
buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&src_ptr_type->name), actual_align));
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&dest_ptr_type->name), wanted_align));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
return ErrorNone;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_struct_value_field_value(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *struct_operand, TypeStructField *field)
|
|
{
|
|
IrInstGen *struct_ptr = ir_get_ref(ira, source_instr, struct_operand, true, false);
|
|
if (type_is_invalid(struct_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, struct_ptr,
|
|
struct_operand->value->type, false);
|
|
if (type_is_invalid(field_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_get_deref(ira, source_instr, field_ptr, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_optional_value_payload_value(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *optional_operand, bool safety_check_on)
|
|
{
|
|
IrInstGen *opt_ptr = ir_get_ref(ira, source_instr, optional_operand, true, false);
|
|
IrInstGen *payload_ptr = ir_analyze_unwrap_optional_payload(ira, source_instr, opt_ptr,
|
|
safety_check_on, false);
|
|
return ir_get_deref(ira, source_instr, payload_ptr, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_cast(IrAnalyze *ira, IrInst *source_instr,
|
|
ZigType *wanted_type, IrInstGen *value)
|
|
{
|
|
Error err;
|
|
ZigType *actual_type = value->value->type;
|
|
AstNode *source_node = source_instr->source_node;
|
|
|
|
if (type_is_invalid(wanted_type) || type_is_invalid(actual_type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// This means the wanted type is anything.
|
|
if (wanted_type == ira->codegen->builtin_types.entry_anytype) {
|
|
return value;
|
|
}
|
|
|
|
// perfect match or non-const to const
|
|
ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
|
|
source_node, false);
|
|
if (const_cast_result.id == ConstCastResultIdInvalid)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (const_cast_result.id == ConstCastResultIdOk) {
|
|
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop);
|
|
}
|
|
|
|
if (const_cast_result.id == ConstCastResultIdFnCC) {
|
|
ir_assert(value->value->type->id == ZigTypeIdFn, source_instr);
|
|
// ConstCastResultIdFnCC is guaranteed to be the last one reported, meaning everything else is ok.
|
|
if (wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync &&
|
|
actual_type->data.fn.fn_type_id.cc == CallingConventionUnspecified)
|
|
{
|
|
ir_assert(value->value->data.x_ptr.special == ConstPtrSpecialFunction, source_instr);
|
|
ZigFn *fn = value->value->data.x_ptr.data.fn.fn_entry;
|
|
if (fn->inferred_async_node == nullptr) {
|
|
fn->inferred_async_node = source_instr->source_node;
|
|
}
|
|
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop);
|
|
}
|
|
}
|
|
|
|
// cast from T to ?T
|
|
// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
|
|
if (wanted_type->id == ZigTypeIdOptional) {
|
|
ZigType *wanted_child_type = wanted_type->data.maybe.child_type;
|
|
if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
|
|
false).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
|
|
} else if (actual_type->id == ZigTypeIdComptimeInt ||
|
|
actual_type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) {
|
|
return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
|
|
} else {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (
|
|
wanted_child_type->id == ZigTypeIdPointer &&
|
|
wanted_child_type->data.pointer.ptr_len == PtrLenUnknown &&
|
|
actual_type->id == ZigTypeIdPointer &&
|
|
actual_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
actual_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if ((err = type_resolve(ira->codegen, wanted_child_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, wanted_child_type) &&
|
|
types_match_const_cast_only(ira, wanted_child_type->data.pointer.child_type,
|
|
actual_type->data.pointer.child_type->data.array.child_type, source_node,
|
|
!wanted_child_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
IrInstGen *cast1 = ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value,
|
|
wanted_child_type);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_analyze_optional_wrap(ira, source_instr, cast1, wanted_type, nullptr);
|
|
}
|
|
}
|
|
}
|
|
|
|
// T to E!T
|
|
if (wanted_type->id == ZigTypeIdErrorUnion) {
|
|
if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
|
|
source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
|
|
} else if (actual_type->id == ZigTypeIdComptimeInt ||
|
|
actual_type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) {
|
|
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
|
|
} else {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
|
|
// cast from T to E!?T
|
|
if (wanted_type->id == ZigTypeIdErrorUnion &&
|
|
wanted_type->data.error_union.payload_type->id == ZigTypeIdOptional &&
|
|
actual_type->id != ZigTypeIdOptional)
|
|
{
|
|
ZigType *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
|
|
if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
|
|
actual_type->id == ZigTypeIdNull ||
|
|
actual_type->id == ZigTypeIdComptimeInt ||
|
|
actual_type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
|
|
if (type_is_invalid(cast2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return cast2;
|
|
}
|
|
}
|
|
|
|
|
|
// cast from comptime-known number to another number type
|
|
if (instr_is_comptime(value) &&
|
|
(actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt ||
|
|
actual_type->id == ZigTypeIdFloat || actual_type->id == ZigTypeIdComptimeFloat) &&
|
|
(wanted_type->id == ZigTypeIdInt || wanted_type->id == ZigTypeIdComptimeInt ||
|
|
wanted_type->id == ZigTypeIdFloat || wanted_type->id == ZigTypeIdComptimeFloat))
|
|
{
|
|
if (value->value->special == ConstValSpecialUndef) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
result->value->special = ConstValSpecialUndef;
|
|
return result;
|
|
}
|
|
if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) {
|
|
if (wanted_type->id == ZigTypeIdComptimeInt || wanted_type->id == ZigTypeIdInt) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdInt) {
|
|
copy_const_val(ira->codegen, result->value, value->value);
|
|
result->value->type = wanted_type;
|
|
} else {
|
|
float_init_bigint(&result->value->data.x_bigint, value->value);
|
|
}
|
|
return result;
|
|
} else if (wanted_type->id == ZigTypeIdComptimeFloat || wanted_type->id == ZigTypeIdFloat) {
|
|
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
|
if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdInt) {
|
|
BigFloat bf;
|
|
bigfloat_init_bigint(&bf, &value->value->data.x_bigint);
|
|
float_init_bigfloat(result->value, &bf);
|
|
} else {
|
|
float_init_float(result->value, value->value);
|
|
}
|
|
return result;
|
|
}
|
|
zig_unreachable();
|
|
} else {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
// widening conversion
|
|
if (wanted_type->id == ZigTypeIdInt &&
|
|
actual_type->id == ZigTypeIdInt &&
|
|
wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
|
|
wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
if (wanted_type->id == ZigTypeIdInt && wanted_type->data.integral.is_signed &&
|
|
actual_type->id == ZigTypeIdInt && !actual_type->data.integral.is_signed &&
|
|
wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// float widening conversion
|
|
if (wanted_type->id == ZigTypeIdFloat &&
|
|
actual_type->id == ZigTypeIdFloat &&
|
|
wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// *[N]T to ?[]T
|
|
if (wanted_type->id == ZigTypeIdOptional &&
|
|
is_slice(wanted_type->data.maybe.child_type) &&
|
|
actual_type->id == ZigTypeIdPointer &&
|
|
actual_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
actual_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
|
|
if (type_is_invalid(cast2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return cast2;
|
|
}
|
|
|
|
// *[N]T to [*]T and [*c]T
|
|
if (wanted_type->id == ZigTypeIdPointer &&
|
|
(wanted_type->data.pointer.ptr_len == PtrLenUnknown || wanted_type->data.pointer.ptr_len == PtrLenC) &&
|
|
actual_type->id == ZigTypeIdPointer &&
|
|
actual_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
actual_type->data.pointer.child_type->id == ZigTypeIdArray &&
|
|
(!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
|
|
(!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile))
|
|
{
|
|
ZigType *actual_array_type = actual_type->data.pointer.child_type;
|
|
if (wanted_type->data.pointer.sentinel == nullptr ||
|
|
(actual_array_type->data.array.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, wanted_type->data.pointer.sentinel,
|
|
actual_array_type->data.array.sentinel)))
|
|
{
|
|
if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if ((err = type_resolve(ira->codegen, wanted_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, wanted_type) &&
|
|
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
|
|
actual_type->data.pointer.child_type->data.array.child_type, source_node,
|
|
!wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
|
|
}
|
|
}
|
|
}
|
|
|
|
// *[N]T to []T
|
|
// *[N]T to E![]T
|
|
if ((is_slice(wanted_type) ||
|
|
(wanted_type->id == ZigTypeIdErrorUnion &&
|
|
is_slice(wanted_type->data.error_union.payload_type))) &&
|
|
actual_type->id == ZigTypeIdPointer &&
|
|
actual_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
actual_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
ZigType *slice_type = (wanted_type->id == ZigTypeIdErrorUnion) ?
|
|
wanted_type->data.error_union.payload_type : wanted_type;
|
|
ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
assert(slice_ptr_type->id == ZigTypeIdPointer);
|
|
ZigType *array_type = actual_type->data.pointer.child_type;
|
|
bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0
|
|
|| !actual_type->data.pointer.is_const);
|
|
|
|
if (const_ok && types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
|
|
array_type->data.array.child_type, source_node,
|
|
!slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk &&
|
|
(slice_ptr_type->data.pointer.sentinel == nullptr ||
|
|
(array_type->data.array.sentinel != nullptr &&
|
|
const_values_equal(ira->codegen, array_type->data.array.sentinel,
|
|
slice_ptr_type->data.pointer.sentinel))))
|
|
{
|
|
// If the pointers both have ABI align, it works.
|
|
// Or if the array length is 0, alignment doesn't matter.
|
|
bool ok_align = array_type->data.array.len == 0 ||
|
|
(slice_ptr_type->data.pointer.explicit_alignment == 0 &&
|
|
actual_type->data.pointer.explicit_alignment == 0);
|
|
if (!ok_align) {
|
|
// If either one has non ABI align, we have to resolve them both
|
|
if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type,
|
|
ResolveStatusAlignmentKnown)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if ((err = type_resolve(ira->codegen, slice_ptr_type->data.pointer.child_type,
|
|
ResolveStatusAlignmentKnown)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ok_align = get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, slice_ptr_type);
|
|
}
|
|
if (ok_align) {
|
|
if (wanted_type->id == ZigTypeIdErrorUnion) {
|
|
IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, slice_type, value);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
|
|
if (type_is_invalid(cast2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return cast2;
|
|
} else {
|
|
return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, slice_type, nullptr);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// @Vector(N,T1) to @Vector(N,T2)
|
|
if (actual_type->id == ZigTypeIdVector && wanted_type->id == ZigTypeIdVector &&
|
|
actual_type->data.vector.len == wanted_type->data.vector.len)
|
|
{
|
|
ZigType *scalar_actual_type = actual_type->data.vector.elem_type;
|
|
ZigType *scalar_wanted_type = wanted_type->data.vector.elem_type;
|
|
|
|
// widening conversion
|
|
if (scalar_wanted_type->id == ZigTypeIdInt &&
|
|
scalar_actual_type->id == ZigTypeIdInt &&
|
|
scalar_wanted_type->data.integral.is_signed == scalar_actual_type->data.integral.is_signed &&
|
|
scalar_wanted_type->data.integral.bit_count >= scalar_actual_type->data.integral.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
if (scalar_wanted_type->id == ZigTypeIdInt && scalar_wanted_type->data.integral.is_signed &&
|
|
scalar_actual_type->id == ZigTypeIdInt && !scalar_actual_type->data.integral.is_signed &&
|
|
scalar_wanted_type->data.integral.bit_count > scalar_actual_type->data.integral.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// float widening conversion
|
|
if (scalar_wanted_type->id == ZigTypeIdFloat &&
|
|
scalar_actual_type->id == ZigTypeIdFloat &&
|
|
scalar_wanted_type->data.floating.bit_count >= scalar_actual_type->data.floating.bit_count)
|
|
{
|
|
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
|
|
}
|
|
}
|
|
|
|
// *@Frame(func) to anyframe->T or anyframe
|
|
// *@Frame(func) to ?anyframe->T or ?anyframe
|
|
// *@Frame(func) to E!anyframe->T or E!anyframe
|
|
if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
!actual_type->data.pointer.is_const &&
|
|
actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame)
|
|
{
|
|
ZigType *anyframe_type;
|
|
if (wanted_type->id == ZigTypeIdAnyFrame) {
|
|
anyframe_type = wanted_type;
|
|
} else if (wanted_type->id == ZigTypeIdOptional &&
|
|
wanted_type->data.maybe.child_type->id == ZigTypeIdAnyFrame)
|
|
{
|
|
anyframe_type = wanted_type->data.maybe.child_type;
|
|
} else if (wanted_type->id == ZigTypeIdErrorUnion &&
|
|
wanted_type->data.error_union.payload_type->id == ZigTypeIdAnyFrame)
|
|
{
|
|
anyframe_type = wanted_type->data.error_union.payload_type;
|
|
} else {
|
|
anyframe_type = nullptr;
|
|
}
|
|
if (anyframe_type != nullptr) {
|
|
bool ok = true;
|
|
if (anyframe_type->data.any_frame.result_type != nullptr) {
|
|
ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn;
|
|
ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type;
|
|
if (anyframe_type->data.any_frame.result_type != fn_return_type) {
|
|
ok = false;
|
|
}
|
|
}
|
|
if (ok) {
|
|
IrInstGen *cast1 = ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, anyframe_type);
|
|
if (anyframe_type == wanted_type)
|
|
return cast1;
|
|
return ir_analyze_cast(ira, source_instr, wanted_type, cast1);
|
|
}
|
|
}
|
|
}
|
|
|
|
// anyframe->T to anyframe
|
|
if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr &&
|
|
wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr)
|
|
{
|
|
return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from null literal to maybe type
|
|
if (wanted_type->id == ZigTypeIdOptional &&
|
|
actual_type->id == ZigTypeIdNull)
|
|
{
|
|
return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from null literal to C pointer
|
|
if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC &&
|
|
actual_type->id == ZigTypeIdNull)
|
|
{
|
|
return ir_analyze_null_to_c_pointer(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from E to E!T
|
|
if (wanted_type->id == ZigTypeIdErrorUnion &&
|
|
actual_type->id == ZigTypeIdErrorSet)
|
|
{
|
|
return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type, nullptr);
|
|
}
|
|
|
|
// cast from typed number to integer or float literal.
|
|
// works when the number is known at compile time
|
|
if (instr_is_comptime(value) &&
|
|
((actual_type->id == ZigTypeIdInt && wanted_type->id == ZigTypeIdComptimeInt) ||
|
|
(actual_type->id == ZigTypeIdFloat && wanted_type->id == ZigTypeIdComptimeFloat)))
|
|
{
|
|
return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from enum literal to enum with matching field name
|
|
if (actual_type->id == ZigTypeIdEnumLiteral && wanted_type->id == ZigTypeIdEnum)
|
|
{
|
|
return ir_analyze_enum_literal(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from enum literal to optional enum
|
|
if (actual_type->id == ZigTypeIdEnumLiteral &&
|
|
(wanted_type->id == ZigTypeIdOptional && wanted_type->data.maybe.child_type->id == ZigTypeIdEnum))
|
|
{
|
|
IrInstGen *result = ir_analyze_enum_literal(ira, source_instr, value, wanted_type->data.maybe.child_type);
|
|
if (type_is_invalid(result->value->type))
|
|
return result;
|
|
|
|
return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
|
|
}
|
|
|
|
// cast from enum literal to error union when payload is an enum
|
|
if (actual_type->id == ZigTypeIdEnumLiteral &&
|
|
(wanted_type->id == ZigTypeIdErrorUnion && wanted_type->data.error_union.payload_type->id == ZigTypeIdEnum))
|
|
{
|
|
IrInstGen *result = ir_analyze_enum_literal(ira, source_instr, value, wanted_type->data.error_union.payload_type);
|
|
if (type_is_invalid(result->value->type))
|
|
return result;
|
|
|
|
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
|
|
}
|
|
|
|
// cast from union to the enum type of the union
|
|
if (actual_type->id == ZigTypeIdUnion && wanted_type->id == ZigTypeIdEnum) {
|
|
if ((err = type_resolve(ira->codegen, actual_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (actual_type->data.unionation.tag_type == wanted_type) {
|
|
return ir_analyze_union_to_tag(ira, source_instr, value, wanted_type);
|
|
}
|
|
}
|
|
|
|
// enum to union which has the enum as the tag type, or
|
|
// enum literal to union which has a matching enum as the tag type
|
|
if (is_tagged_union(wanted_type) && (actual_type->id == ZigTypeIdEnum ||
|
|
actual_type->id == ZigTypeIdEnumLiteral))
|
|
{
|
|
return ir_analyze_enum_to_union(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from *T to *[1]T
|
|
if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
|
|
{
|
|
ZigType *array_type = wanted_type->data.pointer.child_type;
|
|
if (array_type->id == ZigTypeIdArray && array_type->data.array.len == 1 &&
|
|
types_match_const_cast_only(ira, array_type->data.array.child_type,
|
|
actual_type->data.pointer.child_type, source_node,
|
|
!wanted_type->data.pointer.is_const).id == ConstCastResultIdOk &&
|
|
// `types_match_const_cast_only` only gets info for child_types
|
|
(!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
|
|
(!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile))
|
|
{
|
|
if ((err = ir_cast_ptr_align(ira, source_instr, wanted_type, actual_type, value->base.source_node)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
|
|
}
|
|
}
|
|
|
|
// [:x]T to [*:x]T
|
|
// [:x]T to [*c]T
|
|
if (wanted_type->id == ZigTypeIdPointer && is_slice(actual_type) &&
|
|
((wanted_type->data.pointer.ptr_len == PtrLenUnknown && wanted_type->data.pointer.sentinel != nullptr) ||
|
|
wanted_type->data.pointer.ptr_len == PtrLenC))
|
|
{
|
|
ZigType *slice_ptr_type = resolve_struct_field_type(ira->codegen,
|
|
actual_type->data.structure.fields[slice_ptr_index]);
|
|
if (types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
|
|
slice_ptr_type->data.pointer.child_type, source_node,
|
|
!wanted_type->data.pointer.is_const).id == ConstCastResultIdOk &&
|
|
(slice_ptr_type->data.pointer.sentinel != nullptr &&
|
|
(wanted_type->data.pointer.ptr_len == PtrLenC ||
|
|
const_values_equal(ira->codegen, wanted_type->data.pointer.sentinel,
|
|
slice_ptr_type->data.pointer.sentinel))))
|
|
{
|
|
TypeStructField *ptr_field = actual_type->data.structure.fields[slice_ptr_index];
|
|
IrInstGen *slice_ptr = ir_analyze_struct_value_field_value(ira, source_instr, value, ptr_field);
|
|
return ir_implicit_cast2(ira, source_instr, slice_ptr, wanted_type);
|
|
}
|
|
}
|
|
|
|
// cast from *T and [*]T to *c_void and ?*c_void
|
|
// but don't do it if the actual type is a double pointer
|
|
if (is_pointery_and_elem_is_not_pointery(actual_type)) {
|
|
ZigType *dest_ptr_type = nullptr;
|
|
if (wanted_type->id == ZigTypeIdPointer &&
|
|
actual_type->id != ZigTypeIdOptional &&
|
|
wanted_type->data.pointer.child_type == ira->codegen->builtin_types.entry_c_void)
|
|
{
|
|
dest_ptr_type = wanted_type;
|
|
} else if (wanted_type->id == ZigTypeIdOptional &&
|
|
wanted_type->data.maybe.child_type->id == ZigTypeIdPointer &&
|
|
wanted_type->data.maybe.child_type->data.pointer.child_type == ira->codegen->builtin_types.entry_c_void)
|
|
{
|
|
dest_ptr_type = wanted_type->data.maybe.child_type;
|
|
}
|
|
if (dest_ptr_type != nullptr) {
|
|
return ir_analyze_ptr_cast(ira, source_instr, value, source_instr, wanted_type, source_instr, true,
|
|
false);
|
|
}
|
|
}
|
|
|
|
// cast from T to *T where T is zero bits
|
|
if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
|
|
actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
bool has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, actual_type, &has_bits)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (!has_bits) {
|
|
return ir_get_ref(ira, source_instr, value, false, false);
|
|
}
|
|
}
|
|
|
|
// cast from @Vector(N, T) to [N]T
|
|
if (wanted_type->id == ZigTypeIdArray && actual_type->id == ZigTypeIdVector &&
|
|
wanted_type->data.array.len == actual_type->data.vector.len &&
|
|
types_match_const_cast_only(ira, wanted_type->data.array.child_type,
|
|
actual_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_analyze_vector_to_array(ira, source_instr, value, wanted_type, nullptr);
|
|
}
|
|
|
|
// cast from [N]T to @Vector(N, T)
|
|
if (actual_type->id == ZigTypeIdArray && wanted_type->id == ZigTypeIdVector &&
|
|
actual_type->data.array.len == wanted_type->data.vector.len &&
|
|
types_match_const_cast_only(ira, actual_type->data.array.child_type,
|
|
wanted_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_analyze_array_to_vector(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// casting between C pointers and normal pointers
|
|
if (wanted_type->id == ZigTypeIdPointer && actual_type->id == ZigTypeIdPointer &&
|
|
(wanted_type->data.pointer.ptr_len == PtrLenC || actual_type->data.pointer.ptr_len == PtrLenC) &&
|
|
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
|
|
actual_type->data.pointer.child_type, source_node,
|
|
!wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
|
|
{
|
|
return ir_analyze_ptr_cast(ira, source_instr, value, source_instr, wanted_type, source_instr, true, false);
|
|
}
|
|
|
|
// cast from integer to C pointer
|
|
if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC &&
|
|
(actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt))
|
|
{
|
|
return ir_analyze_int_to_c_ptr(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// cast from inferred struct type to array, union, or struct
|
|
if (is_anon_container(actual_type)) {
|
|
const bool is_array_init =
|
|
actual_type->data.structure.special == StructSpecialInferredTuple;
|
|
const uint32_t field_count = actual_type->data.structure.src_field_count;
|
|
|
|
if (wanted_type->id == ZigTypeIdArray && (is_array_init || field_count == 0) &&
|
|
wanted_type->data.array.len == field_count)
|
|
{
|
|
return ir_analyze_struct_literal_to_array(ira, source_instr, value, wanted_type);
|
|
} else if (wanted_type->id == ZigTypeIdStruct && !is_slice(wanted_type) &&
|
|
(!is_array_init || field_count == 0))
|
|
{
|
|
return ir_analyze_struct_literal_to_struct(ira, source_instr, value, wanted_type);
|
|
} else if (wanted_type->id == ZigTypeIdUnion && !is_array_init && field_count == 1) {
|
|
return ir_analyze_struct_literal_to_union(ira, source_instr, value, wanted_type);
|
|
}
|
|
}
|
|
|
|
// cast from undefined to anything
|
|
if (actual_type->id == ZigTypeIdUndefined) {
|
|
return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
|
|
}
|
|
|
|
// T to ?U, where T implicitly casts to U
|
|
if (wanted_type->id == ZigTypeIdOptional && actual_type->id != ZigTypeIdOptional) {
|
|
IrInstGen *cast1 = ir_implicit_cast2(ira, source_instr, value, wanted_type->data.maybe.child_type);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_implicit_cast2(ira, source_instr, cast1, wanted_type);
|
|
}
|
|
|
|
// T to E!U, where T implicitly casts to U
|
|
if (wanted_type->id == ZigTypeIdErrorUnion && actual_type->id != ZigTypeIdErrorUnion &&
|
|
actual_type->id != ZigTypeIdErrorSet)
|
|
{
|
|
IrInstGen *cast1 = ir_implicit_cast2(ira, source_instr, value, wanted_type->data.error_union.payload_type);
|
|
if (type_is_invalid(cast1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_implicit_cast2(ira, source_instr, cast1, wanted_type);
|
|
}
|
|
|
|
ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("expected type '%s', found '%s'",
|
|
buf_ptr(&wanted_type->name),
|
|
buf_ptr(&actual_type->name)));
|
|
report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static IrInstGen *ir_implicit_cast2(IrAnalyze *ira, IrInst *value_source_instr,
|
|
IrInstGen *value, ZigType *expected_type)
|
|
{
|
|
assert(value);
|
|
assert(!expected_type || !type_is_invalid(expected_type));
|
|
assert(value->value->type);
|
|
assert(!type_is_invalid(value->value->type));
|
|
if (expected_type == nullptr)
|
|
return value; // anything will do
|
|
if (expected_type == value->value->type)
|
|
return value; // match
|
|
if (value->value->type->id == ZigTypeIdUnreachable)
|
|
return value;
|
|
|
|
return ir_analyze_cast(ira, value_source_instr, expected_type, value);
|
|
}
|
|
|
|
static IrInstGen *ir_implicit_cast(IrAnalyze *ira, IrInstGen *value, ZigType *expected_type) {
|
|
return ir_implicit_cast2(ira, &value->base, value, expected_type);
|
|
}
|
|
|
|
static ZigType *get_ptr_elem_type(CodeGen *g, IrInstGen *ptr) {
|
|
ir_assert_gen(ptr->value->type->id == ZigTypeIdPointer, ptr);
|
|
ZigType *elem_type = ptr->value->type->data.pointer.child_type;
|
|
if (elem_type != g->builtin_types.entry_anytype)
|
|
return elem_type;
|
|
|
|
if (ir_resolve_lazy(g, ptr->base.source_node, ptr->value))
|
|
return g->builtin_types.entry_invalid;
|
|
|
|
assert(value_is_comptime(ptr->value));
|
|
ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
|
|
return pointee->type;
|
|
}
|
|
|
|
static IrInstGen *ir_get_deref(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *ptr,
|
|
ResultLoc *result_loc)
|
|
{
|
|
Error err;
|
|
ZigType *ptr_type = ptr->value->type;
|
|
if (type_is_invalid(ptr_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ptr_type->id != ZigTypeIdPointer) {
|
|
ir_add_error_node(ira, source_instruction->source_node,
|
|
buf_sprintf("attempt to dereference non-pointer type '%s'",
|
|
buf_ptr(&ptr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_type = ptr_type->data.pointer.child_type;
|
|
if (type_is_invalid(child_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
// if the child type has one possible value, the deref is comptime
|
|
switch (type_has_one_possible_value(ira->codegen, child_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_move(ira, source_instruction,
|
|
get_the_one_possible_value(ira->codegen, child_type));
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
if (instr_is_comptime(ptr)) {
|
|
if (ptr->value->special == ConstValSpecialUndef) {
|
|
// If we are in a TypeOf call, we return an undefined value instead of erroring
|
|
// since we know the type.
|
|
if (get_scope_typeof(source_instruction->scope)) {
|
|
return ir_const_undef(ira, source_instruction, child_type);
|
|
}
|
|
|
|
ir_add_error(ira, &ptr->base, buf_sprintf("attempt to dereference undefined value"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
ZigValue *pointee = const_ptr_pointee_unchecked(ira->codegen, ptr->value);
|
|
if (child_type == ira->codegen->builtin_types.entry_anytype) {
|
|
child_type = pointee->type;
|
|
}
|
|
if (pointee->special != ConstValSpecialRuntime) {
|
|
IrInstGen *result = ir_const(ira, source_instruction, child_type);
|
|
|
|
if ((err = ir_read_const_ptr(ira, ira->codegen, source_instruction->source_node, result->value,
|
|
ptr->value)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
result->value->type = child_type;
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
// if the instruction is a const ref instruction we can skip it
|
|
if (ptr->id == IrInstGenIdRef) {
|
|
IrInstGenRef *ref_inst = reinterpret_cast<IrInstGenRef *>(ptr);
|
|
return ref_inst->operand;
|
|
}
|
|
|
|
// If the instruction is a element pointer instruction to a vector, we emit
|
|
// vector element extract instruction rather than load pointer. If the
|
|
// pointer type has non-VECTOR_INDEX_RUNTIME value, it would have been
|
|
// possible to implement this in the codegen for IrInstGenLoadPtr.
|
|
// However if it has VECTOR_INDEX_RUNTIME then we must emit a compile error
|
|
// if the vector index cannot be determined right here, right now, because
|
|
// the type information does not contain enough information to actually
|
|
// perform a dereference.
|
|
if (ptr_type->data.pointer.vector_index == VECTOR_INDEX_RUNTIME) {
|
|
if (ptr->id == IrInstGenIdElemPtr) {
|
|
IrInstGenElemPtr *elem_ptr = (IrInstGenElemPtr *)ptr;
|
|
IrInstGen *vector_loaded = ir_get_deref(ira, &elem_ptr->array_ptr->base,
|
|
elem_ptr->array_ptr, nullptr);
|
|
IrInstGen *elem_index = elem_ptr->elem_index;
|
|
return ir_build_vector_extract_elem(ira, source_instruction, vector_loaded, elem_index);
|
|
}
|
|
ir_add_error(ira, &ptr->base,
|
|
buf_sprintf("unable to determine vector element index of type '%s'", buf_ptr(&ptr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result_loc_inst;
|
|
if (ptr_type->data.pointer.host_int_bytes != 0 && handle_is_ptr(ira->codegen, child_type)) {
|
|
if (result_loc == nullptr) result_loc = no_result_loc();
|
|
result_loc_inst = ir_resolve_result(ira, source_instruction, result_loc, child_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc_inst;
|
|
}
|
|
} else {
|
|
result_loc_inst = nullptr;
|
|
}
|
|
|
|
return ir_build_load_ptr_gen(ira, source_instruction, ptr, child_type, result_loc_inst);
|
|
}
|
|
|
|
static bool ir_resolve_const_align(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
|
|
ZigValue *const_val, uint32_t *out)
|
|
{
|
|
Error err;
|
|
if ((err = ir_resolve_const_val(codegen, exec, source_node, const_val, UndefBad)))
|
|
return false;
|
|
|
|
uint32_t align_bytes = bigint_as_u32(&const_val->data.x_bigint);
|
|
if (align_bytes == 0) {
|
|
exec_add_error_node_gen(codegen, exec, source_node, buf_sprintf("alignment must be >= 1"));
|
|
return false;
|
|
}
|
|
|
|
if (!is_power_of_2(align_bytes)) {
|
|
exec_add_error_node_gen(codegen, exec, source_node,
|
|
buf_sprintf("alignment value %" PRIu32 " is not a power of 2", align_bytes));
|
|
return false;
|
|
}
|
|
|
|
*out = align_bytes;
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_align(IrAnalyze *ira, IrInstGen *value, ZigType *elem_type, uint32_t *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
// Look for this pattern: `*align(@alignOf(T)) T`.
|
|
// This can be resolved to be `*out = 0` without resolving any alignment.
|
|
if (elem_type != nullptr && value->value->special == ConstValSpecialLazy &&
|
|
value->value->data.x_lazy->id == LazyValueIdAlignOf)
|
|
{
|
|
LazyValueAlignOf *lazy_align_of = reinterpret_cast<LazyValueAlignOf *>(value->value->data.x_lazy);
|
|
|
|
ZigType *lazy_elem_type = ir_resolve_type(lazy_align_of->ira, lazy_align_of->target_type);
|
|
if (type_is_invalid(lazy_elem_type))
|
|
return false;
|
|
|
|
if (elem_type == lazy_elem_type) {
|
|
*out = 0;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, get_align_amt_type(ira->codegen));
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
return ir_resolve_const_align(ira->codegen, ira->new_irb.exec, value->base.source_node,
|
|
casted_value->value, out);
|
|
}
|
|
|
|
static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstGen *value, ZigType *int_type, uint64_t *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, int_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = bigint_as_u64(&const_val->data.x_bigint);
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_usize(IrAnalyze *ira, IrInstGen *value, uint64_t *out) {
|
|
return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out);
|
|
}
|
|
|
|
static bool ir_resolve_bool(IrAnalyze *ira, IrInstGen *value, bool *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_bool);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = const_val->data.x_bool;
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_comptime(IrAnalyze *ira, IrInstGen *value, bool *out) {
|
|
if (!value) {
|
|
*out = false;
|
|
return true;
|
|
}
|
|
return ir_resolve_bool(ira, value, out);
|
|
}
|
|
|
|
static bool ir_resolve_reduce_op(IrAnalyze *ira, IrInstGen *value, ReduceOp *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
ZigType *reduce_op_type = get_builtin_type(ira->codegen, "ReduceOp");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, reduce_op_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = (ReduceOp)bigint_as_u32(&const_val->data.x_enum_tag);
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstGen *value, AtomicOrder *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
ZigType *atomic_order_type = get_builtin_type(ira->codegen, "AtomicOrder");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, atomic_order_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = (AtomicOrder)bigint_as_u32(&const_val->data.x_enum_tag);
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_atomic_rmw_op(IrAnalyze *ira, IrInstGen *value, AtomicRmwOp *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
ZigType *atomic_rmw_op_type = get_builtin_type(ira->codegen, "AtomicRmwOp");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, atomic_rmw_op_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = (AtomicRmwOp)bigint_as_u32(&const_val->data.x_enum_tag);
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_global_linkage(IrAnalyze *ira, IrInstGen *value, GlobalLinkageId *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
ZigType *global_linkage_type = get_builtin_type(ira->codegen, "GlobalLinkage");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, global_linkage_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = (GlobalLinkageId)bigint_as_u32(&const_val->data.x_enum_tag);
|
|
return true;
|
|
}
|
|
|
|
static bool ir_resolve_float_mode(IrAnalyze *ira, IrInstGen *value, FloatMode *out) {
|
|
if (type_is_invalid(value->value->type))
|
|
return false;
|
|
|
|
ZigType *float_mode_type = get_builtin_type(ira->codegen, "FloatMode");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, float_mode_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return false;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return false;
|
|
|
|
*out = (FloatMode)bigint_as_u32(&const_val->data.x_enum_tag);
|
|
return true;
|
|
}
|
|
|
|
static Buf *ir_resolve_str(IrAnalyze *ira, IrInstGen *value) {
|
|
if (type_is_invalid(value->value->type))
|
|
return nullptr;
|
|
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown, 0, 0, 0, false);
|
|
ZigType *str_type = get_slice_type(ira->codegen, ptr_type);
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, str_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return nullptr;
|
|
|
|
ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_val)
|
|
return nullptr;
|
|
|
|
ZigValue *ptr_field = const_val->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *len_field = const_val->data.x_struct.fields[slice_len_index];
|
|
|
|
assert(ptr_field->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
ZigValue *array_val = ptr_field->data.x_ptr.data.base_array.array_val;
|
|
expand_undef_array(ira->codegen, array_val);
|
|
size_t len = bigint_as_usize(&len_field->data.x_bigint);
|
|
if (array_val->data.x_array.special == ConstArraySpecialBuf && len == buf_len(array_val->data.x_array.data.s_buf)) {
|
|
return array_val->data.x_array.data.s_buf;
|
|
}
|
|
Buf *result = buf_alloc();
|
|
buf_resize(result, len);
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
size_t new_index = ptr_field->data.x_ptr.data.base_array.elem_index + i;
|
|
ZigValue *char_val = &array_val->data.x_array.data.s_none.elements[new_index];
|
|
if (char_val->special == ConstValSpecialUndef) {
|
|
ir_add_error(ira, &casted_value->base, buf_sprintf("use of undefined value"));
|
|
return nullptr;
|
|
}
|
|
uint64_t big_c = bigint_as_u64(&char_val->data.x_bigint);
|
|
assert(big_c <= UINT8_MAX);
|
|
uint8_t c = (uint8_t)big_c;
|
|
buf_ptr(result)[i] = c;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_add_implicit_return_type(IrAnalyze *ira,
|
|
IrInstSrcAddImplicitReturnType *instruction)
|
|
{
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
if (instruction->result_loc_ret == nullptr || !instruction->result_loc_ret->implicit_return_type_done) {
|
|
ira->src_implicit_return_type_list.append(value);
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_return(IrAnalyze *ira, IrInstSrcReturn *instruction) {
|
|
if (instruction->operand == nullptr) {
|
|
// result location mechanism took care of it.
|
|
IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, nullptr);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
IrInstGen *operand = instruction->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
IrInstGen *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
|
|
if (type_is_invalid(casted_operand->value->type)) {
|
|
AstNode *source_node = ira->explicit_return_type_source_node;
|
|
if (source_node != nullptr) {
|
|
ErrorMsg *msg = ira->codegen->errors.last();
|
|
add_error_note(ira->codegen, msg, source_node,
|
|
buf_sprintf("return type declared here"));
|
|
}
|
|
return ir_unreach_error(ira);
|
|
}
|
|
|
|
if (!instr_is_comptime(operand) && ira->explicit_return_type != nullptr &&
|
|
handle_is_ptr(ira->codegen, ira->explicit_return_type))
|
|
{
|
|
// result location mechanism took care of it.
|
|
IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, nullptr);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
if (casted_operand->value->special == ConstValSpecialRuntime &&
|
|
casted_operand->value->type->id == ZigTypeIdPointer &&
|
|
casted_operand->value->data.rh_ptr == RuntimeHintPtrStack)
|
|
{
|
|
ir_add_error(ira, &instruction->operand->base, buf_sprintf("function returns address of local variable"));
|
|
return ir_unreach_error(ira);
|
|
}
|
|
|
|
IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, casted_operand);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_const(IrAnalyze *ira, IrInstSrcConst *instruction) {
|
|
return ir_const_move(ira, &instruction->base.base, instruction->value);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bin_op_bool(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
|
|
IrInstGen *op1 = bin_op_instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = bin_op_instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *bool_type = ira->codegen->builtin_types.entry_bool;
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, bool_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, bool_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
|
|
ZigValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(casted_op1->value->type->id == ZigTypeIdBool);
|
|
assert(casted_op2->value->type->id == ZigTypeIdBool);
|
|
bool result_bool;
|
|
if (bin_op_instruction->op_id == IrBinOpBoolOr) {
|
|
result_bool = op1_val->data.x_bool || op2_val->data.x_bool;
|
|
} else if (bin_op_instruction->op_id == IrBinOpBoolAnd) {
|
|
result_bool = op1_val->data.x_bool && op2_val->data.x_bool;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, result_bool);
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, bool_type,
|
|
bin_op_instruction->op_id, casted_op1, casted_op2, bin_op_instruction->safety_check_on);
|
|
}
|
|
|
|
static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
|
|
switch (op_id) {
|
|
case IrBinOpCmpEq:
|
|
return cmp == CmpEQ;
|
|
case IrBinOpCmpNotEq:
|
|
return cmp != CmpEQ;
|
|
case IrBinOpCmpLessThan:
|
|
return cmp == CmpLT;
|
|
case IrBinOpCmpGreaterThan:
|
|
return cmp == CmpGT;
|
|
case IrBinOpCmpLessOrEq:
|
|
return cmp != CmpGT;
|
|
case IrBinOpCmpGreaterOrEq:
|
|
return cmp != CmpLT;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static void set_optional_value_to_null(ZigValue *val) {
|
|
assert(val->special == ConstValSpecialStatic);
|
|
if (val->type->id == ZigTypeIdNull) return; // nothing to do
|
|
assert(val->type->id == ZigTypeIdOptional);
|
|
if (get_src_ptr_type(val->type) != nullptr) {
|
|
val->data.x_ptr.special = ConstPtrSpecialNull;
|
|
} else if (is_opt_err_set(val->type)) {
|
|
val->data.x_err_set = nullptr;
|
|
} else {
|
|
val->data.x_optional = nullptr;
|
|
}
|
|
}
|
|
|
|
static void set_optional_payload(ZigValue *opt_val, ZigValue *payload) {
|
|
assert(opt_val->special == ConstValSpecialStatic);
|
|
assert(opt_val->type->id == ZigTypeIdOptional);
|
|
if (payload == nullptr) {
|
|
set_optional_value_to_null(opt_val);
|
|
} else if (get_src_ptr_type(opt_val->type)) {
|
|
assert(get_src_ptr_type(payload->type));
|
|
opt_val->data.x_ptr = payload->data.x_ptr;
|
|
} else if (is_opt_err_set(opt_val->type)) {
|
|
assert(payload->type->id == ZigTypeIdErrorSet);
|
|
opt_val->data.x_err_set = payload->data.x_err_set;
|
|
} else {
|
|
opt_val->data.x_optional = payload;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_evaluate_bin_op_cmp(IrAnalyze *ira, ZigType *resolved_type,
|
|
ZigValue *op1_val, ZigValue *op2_val, IrInst *source_instr, IrBinOp op_id,
|
|
bool one_possible_value)
|
|
{
|
|
if (op1_val->special == ConstValSpecialUndef ||
|
|
op2_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_instr, resolved_type);
|
|
if (resolved_type->id == ZigTypeIdPointer && op_id != IrBinOpCmpEq && op_id != IrBinOpCmpNotEq) {
|
|
if ((op1_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
|
|
op1_val->data.x_ptr.special == ConstPtrSpecialNull) &&
|
|
(op2_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
|
|
op2_val->data.x_ptr.special == ConstPtrSpecialNull))
|
|
{
|
|
uint64_t op1_addr = op1_val->data.x_ptr.special == ConstPtrSpecialNull ?
|
|
0 : op1_val->data.x_ptr.data.hard_coded_addr.addr;
|
|
uint64_t op2_addr = op2_val->data.x_ptr.special == ConstPtrSpecialNull ?
|
|
0 : op2_val->data.x_ptr.data.hard_coded_addr.addr;
|
|
Cmp cmp_result;
|
|
if (op1_addr > op2_addr) {
|
|
cmp_result = CmpGT;
|
|
} else if (op1_addr < op2_addr) {
|
|
cmp_result = CmpLT;
|
|
} else {
|
|
cmp_result = CmpEQ;
|
|
}
|
|
bool answer = resolve_cmp_op_id(op_id, cmp_result);
|
|
return ir_const_bool(ira, source_instr, answer);
|
|
}
|
|
} else {
|
|
bool are_equal = one_possible_value || const_values_equal(ira->codegen, op1_val, op2_val);
|
|
bool answer;
|
|
if (op_id == IrBinOpCmpEq) {
|
|
answer = are_equal;
|
|
} else if (op_id == IrBinOpCmpNotEq) {
|
|
answer = !are_equal;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
return ir_const_bool(ira, source_instr, answer);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_try_evaluate_bin_op_cmp_const(IrAnalyze *ira, IrInst *source_instr, IrInstGen *op1, IrInstGen *op2,
|
|
ZigType *resolved_type, IrBinOp op_id)
|
|
{
|
|
assert(op1->value->type == resolved_type && op2->value->type == resolved_type);
|
|
bool one_possible_value;
|
|
switch (type_has_one_possible_value(ira->codegen, resolved_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
one_possible_value = true;
|
|
break;
|
|
case OnePossibleValueNo:
|
|
one_possible_value = false;
|
|
break;
|
|
}
|
|
|
|
if (one_possible_value || (instr_is_comptime(op1) && instr_is_comptime(op2))) {
|
|
ZigValue *op1_val = one_possible_value ? op1->value : ir_resolve_const(ira, op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigValue *op2_val = one_possible_value ? op2->value : ir_resolve_const(ira, op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (resolved_type->id != ZigTypeIdVector)
|
|
return ir_evaluate_bin_op_cmp(ira, resolved_type, op1_val, op2_val, source_instr, op_id, one_possible_value);
|
|
IrInstGen *result = ir_const(ira, source_instr,
|
|
get_vector_type(ira->codegen, resolved_type->data.vector.len, ira->codegen->builtin_types.entry_bool));
|
|
result->value->data.x_array.data.s_none.elements =
|
|
ira->codegen->pass1_arena->allocate<ZigValue>(resolved_type->data.vector.len);
|
|
|
|
expand_undef_array(ira->codegen, result->value);
|
|
for (size_t i = 0;i < resolved_type->data.vector.len;i++) {
|
|
IrInstGen *cur_res = ir_evaluate_bin_op_cmp(ira, resolved_type->data.vector.elem_type,
|
|
&op1_val->data.x_array.data.s_none.elements[i],
|
|
&op2_val->data.x_array.data.s_none.elements[i],
|
|
source_instr, op_id, one_possible_value);
|
|
copy_const_val(ira->codegen, &result->value->data.x_array.data.s_none.elements[i], cur_res->value);
|
|
}
|
|
return result;
|
|
} else {
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
// Returns ErrorNotLazy when the value cannot be determined
|
|
static Error lazy_cmp_zero(CodeGen *codegen, AstNode *source_node, ZigValue *val, Cmp *result) {
|
|
Error err;
|
|
|
|
switch (type_has_one_possible_value(codegen, val->type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ErrorSemanticAnalyzeFail;
|
|
case OnePossibleValueNo:
|
|
break;
|
|
case OnePossibleValueYes:
|
|
switch (val->type->id) {
|
|
case ZigTypeIdInt:
|
|
src_assert(val->type->data.integral.bit_count == 0, source_node);
|
|
*result = CmpEQ;
|
|
return ErrorNone;
|
|
case ZigTypeIdUndefined:
|
|
return ErrorNotLazy;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
switch (val->special) {
|
|
case ConstValSpecialRuntime:
|
|
case ConstValSpecialUndef:
|
|
return ErrorNotLazy;
|
|
case ConstValSpecialStatic:
|
|
switch (val->type->id) {
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdInt:
|
|
*result = bigint_cmp_zero(&val->data.x_bigint);
|
|
return ErrorNone;
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdFloat:
|
|
if (float_is_nan(val))
|
|
return ErrorNotLazy;
|
|
*result = float_cmp_zero(val);
|
|
return ErrorNone;
|
|
default:
|
|
return ErrorNotLazy;
|
|
}
|
|
case ConstValSpecialLazy:
|
|
switch (val->data.x_lazy->id) {
|
|
case LazyValueIdInvalid:
|
|
zig_unreachable();
|
|
case LazyValueIdAlignOf: {
|
|
LazyValueAlignOf *lazy_align_of = reinterpret_cast<LazyValueAlignOf *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_align_of->ira;
|
|
|
|
bool is_zero_bits;
|
|
if ((err = type_val_resolve_zero_bits(ira->codegen, lazy_align_of->target_type->value,
|
|
nullptr, nullptr, &is_zero_bits)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
*result = is_zero_bits ? CmpEQ : CmpGT;
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdSizeOf: {
|
|
LazyValueSizeOf *lazy_size_of = reinterpret_cast<LazyValueSizeOf *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_size_of->ira;
|
|
bool is_zero_bits;
|
|
if ((err = type_val_resolve_zero_bits(ira->codegen, lazy_size_of->target_type->value,
|
|
nullptr, nullptr, &is_zero_bits)))
|
|
{
|
|
return err;
|
|
}
|
|
*result = is_zero_bits ? CmpEQ : CmpGT;
|
|
return ErrorNone;
|
|
}
|
|
default:
|
|
return ErrorNotLazy;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ErrorMsg *ir_eval_bin_op_cmp_scalar(IrAnalyze *ira, IrInst* source_instr,
|
|
ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val, ZigValue *out_val)
|
|
{
|
|
Error err;
|
|
{
|
|
// Before resolving the values, we special case comparisons against zero. These can often
|
|
// be done without resolving lazy values, preventing potential dependency loops.
|
|
Cmp op1_cmp_zero;
|
|
if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op1_val, &op1_cmp_zero))) {
|
|
if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
|
|
return ira->codegen->trace_err;
|
|
}
|
|
Cmp op2_cmp_zero;
|
|
if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op2_val, &op2_cmp_zero))) {
|
|
if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
|
|
return ira->codegen->trace_err;
|
|
}
|
|
bool can_cmp_zero = false;
|
|
Cmp cmp_result;
|
|
if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpEQ) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpEQ;
|
|
} else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpEQ) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpGT;
|
|
} else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpGT) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpLT;
|
|
} else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpEQ) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpLT;
|
|
} else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpLT) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpGT;
|
|
} else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpGT) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpLT;
|
|
} else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpLT) {
|
|
can_cmp_zero = true;
|
|
cmp_result = CmpGT;
|
|
}
|
|
if (can_cmp_zero) {
|
|
bool answer = resolve_cmp_op_id(op_id, cmp_result);
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = answer;
|
|
return nullptr;
|
|
}
|
|
}
|
|
never_mind_just_calculate_it_normally:
|
|
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_instr->source_node,
|
|
op1_val, UndefOk)))
|
|
{
|
|
return ira->codegen->trace_err;
|
|
}
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_instr->source_node,
|
|
op2_val, UndefOk)))
|
|
{
|
|
return ira->codegen->trace_err;
|
|
}
|
|
|
|
|
|
if (op1_val->special == ConstValSpecialUndef || op2_val->special == ConstValSpecialUndef ||
|
|
op1_val->type->id == ZigTypeIdUndefined || op2_val->type->id == ZigTypeIdUndefined)
|
|
{
|
|
out_val->special = ConstValSpecialUndef;
|
|
return nullptr;
|
|
}
|
|
|
|
bool op1_is_float = op1_val->type->id == ZigTypeIdFloat || op1_val->type->id == ZigTypeIdComptimeFloat;
|
|
bool op2_is_float = op2_val->type->id == ZigTypeIdFloat || op2_val->type->id == ZigTypeIdComptimeFloat;
|
|
if (op1_is_float && op2_is_float) {
|
|
if (float_is_nan(op1_val) || float_is_nan(op2_val)) {
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = op_id == IrBinOpCmpNotEq;
|
|
return nullptr;
|
|
}
|
|
if (op1_val->type->id == ZigTypeIdComptimeFloat) {
|
|
IrInstGen *tmp = ir_const_noval(ira, source_instr);
|
|
tmp->value = op1_val;
|
|
IrInstGen *casted = ir_implicit_cast(ira, tmp, op2_val->type);
|
|
op1_val = casted->value;
|
|
} else if (op2_val->type->id == ZigTypeIdComptimeFloat) {
|
|
IrInstGen *tmp = ir_const_noval(ira, source_instr);
|
|
tmp->value = op2_val;
|
|
IrInstGen *casted = ir_implicit_cast(ira, tmp, op1_val->type);
|
|
op2_val = casted->value;
|
|
}
|
|
Cmp cmp_result = float_cmp(op1_val, op2_val);
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);
|
|
return nullptr;
|
|
}
|
|
|
|
bool op1_is_int = op1_val->type->id == ZigTypeIdInt || op1_val->type->id == ZigTypeIdComptimeInt;
|
|
bool op2_is_int = op2_val->type->id == ZigTypeIdInt || op2_val->type->id == ZigTypeIdComptimeInt;
|
|
|
|
if (op1_is_int && op2_is_int) {
|
|
Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
// Handle the case where one of the two operands is a fp value and the other
|
|
// is an integer value
|
|
ZigValue *float_val;
|
|
if (op1_is_int && op2_is_float) {
|
|
float_val = op2_val;
|
|
} else if (op1_is_float && op2_is_int) {
|
|
float_val = op1_val;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
// They can never be equal if the fp value has a non-zero decimal part
|
|
if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
|
|
if (float_has_fraction(float_val)) {
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = op_id == IrBinOpCmpNotEq;
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
// Cast the integer operand into a fp value to perform the comparison
|
|
BigFloat op1_bigfloat;
|
|
BigFloat op2_bigfloat;
|
|
value_to_bigfloat(&op1_bigfloat, op1_val);
|
|
value_to_bigfloat(&op2_bigfloat, op2_val);
|
|
|
|
Cmp cmp_result = bigfloat_cmp(&op1_bigfloat, &op2_bigfloat);
|
|
out_val->special = ConstValSpecialStatic;
|
|
out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bin_op_cmp_numeric(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *op1, IrInstGen *op2, IrBinOp op_id)
|
|
{
|
|
Error err;
|
|
|
|
ZigType *scalar_result_type = ira->codegen->builtin_types.entry_bool;
|
|
ZigType *result_type = scalar_result_type;
|
|
ZigType *op1_scalar_type = op1->value->type;
|
|
ZigType *op2_scalar_type = op2->value->type;
|
|
if (op1->value->type->id == ZigTypeIdVector && op2->value->type->id == ZigTypeIdVector) {
|
|
if (op1->value->type->data.vector.len != op2->value->type->data.vector.len) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("vector length mismatch: %" PRIu64 " and %" PRIu64,
|
|
op1->value->type->data.vector.len, op2->value->type->data.vector.len));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
result_type = get_vector_type(ira->codegen, op1->value->type->data.vector.len, scalar_result_type);
|
|
op1_scalar_type = op1->value->type->data.vector.elem_type;
|
|
op2_scalar_type = op2->value->type->data.vector.elem_type;
|
|
} else if (op1->value->type->id == ZigTypeIdVector || op2->value->type->id == ZigTypeIdVector) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("mixed scalar and vector operands to comparison operator: '%s' and '%s'",
|
|
buf_ptr(&op1->value->type->name), buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
bool opv_op1;
|
|
switch (type_has_one_possible_value(ira->codegen, op1->value->type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
opv_op1 = true;
|
|
break;
|
|
case OnePossibleValueNo:
|
|
opv_op1 = false;
|
|
break;
|
|
}
|
|
bool opv_op2;
|
|
switch (type_has_one_possible_value(ira->codegen, op2->value->type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
opv_op2 = true;
|
|
break;
|
|
case OnePossibleValueNo:
|
|
opv_op2 = false;
|
|
break;
|
|
}
|
|
Cmp op1_cmp_zero;
|
|
bool have_op1_cmp_zero = false;
|
|
if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op1->value, &op1_cmp_zero))) {
|
|
if (err != ErrorNotLazy) return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
have_op1_cmp_zero = true;
|
|
}
|
|
Cmp op2_cmp_zero;
|
|
bool have_op2_cmp_zero = false;
|
|
if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op2->value, &op2_cmp_zero))) {
|
|
if (err != ErrorNotLazy) return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
have_op2_cmp_zero = true;
|
|
}
|
|
if (((opv_op1 || instr_is_comptime(op1)) && (opv_op2 || instr_is_comptime(op2))) ||
|
|
(have_op1_cmp_zero && have_op2_cmp_zero))
|
|
{
|
|
IrInstGen *result_instruction = ir_const(ira, source_instr, result_type);
|
|
ZigValue *out_val = result_instruction->value;
|
|
if (result_type->id == ZigTypeIdVector) {
|
|
size_t len = result_type->data.vector.len;
|
|
expand_undef_array(ira->codegen, op1->value);
|
|
expand_undef_array(ira->codegen, op2->value);
|
|
out_val->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, out_val);
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
ZigValue *scalar_op1_val = &op1->value->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_op2_val = &op2->value->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_out_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
assert(scalar_out_val->type == scalar_result_type);
|
|
ErrorMsg *msg = ir_eval_bin_op_cmp_scalar(ira, source_instr,
|
|
scalar_op1_val, op_id, scalar_op2_val, scalar_out_val);
|
|
if (msg != nullptr) {
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
out_val->type = result_type;
|
|
out_val->special = ConstValSpecialStatic;
|
|
} else {
|
|
if (ir_eval_bin_op_cmp_scalar(ira, source_instr, op1->value, op_id,
|
|
op2->value, out_val) != nullptr)
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
return result_instruction;
|
|
}
|
|
|
|
// If one operand has a comptime-known comparison with 0, and the other operand is unsigned, we might
|
|
// know the answer, depending on the operator.
|
|
// TODO make this work with vectors
|
|
if (have_op1_cmp_zero && op2_scalar_type->id == ZigTypeIdInt && !op2_scalar_type->data.integral.is_signed) {
|
|
if (op1_cmp_zero == CmpEQ) {
|
|
// 0 <= unsigned_x // true
|
|
// 0 > unsigned_x // false
|
|
switch (op_id) {
|
|
case IrBinOpCmpLessOrEq:
|
|
return ir_const_bool(ira, source_instr, true);
|
|
case IrBinOpCmpGreaterThan:
|
|
return ir_const_bool(ira, source_instr, false);
|
|
default:
|
|
break;
|
|
}
|
|
} else if (op1_cmp_zero == CmpLT) {
|
|
// -1 != unsigned_x // true
|
|
// -1 <= unsigned_x // true
|
|
// -1 < unsigned_x // true
|
|
// -1 == unsigned_x // false
|
|
// -1 >= unsigned_x // false
|
|
// -1 > unsigned_x // false
|
|
switch (op_id) {
|
|
case IrBinOpCmpNotEq:
|
|
case IrBinOpCmpLessOrEq:
|
|
case IrBinOpCmpLessThan:
|
|
return ir_const_bool(ira, source_instr, true);
|
|
case IrBinOpCmpEq:
|
|
case IrBinOpCmpGreaterOrEq:
|
|
case IrBinOpCmpGreaterThan:
|
|
return ir_const_bool(ira, source_instr, false);
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
if (have_op2_cmp_zero && op1_scalar_type->id == ZigTypeIdInt && !op1_scalar_type->data.integral.is_signed) {
|
|
if (op2_cmp_zero == CmpEQ) {
|
|
// unsigned_x < 0 // false
|
|
// unsigned_x >= 0 // true
|
|
switch (op_id) {
|
|
case IrBinOpCmpLessThan:
|
|
return ir_const_bool(ira, source_instr, false);
|
|
case IrBinOpCmpGreaterOrEq:
|
|
return ir_const_bool(ira, source_instr, true);
|
|
default:
|
|
break;
|
|
}
|
|
} else if (op2_cmp_zero == CmpLT) {
|
|
// unsigned_x != -1 // true
|
|
// unsigned_x >= -1 // true
|
|
// unsigned_x > -1 // true
|
|
// unsigned_x == -1 // false
|
|
// unsigned_x < -1 // false
|
|
// unsigned_x <= -1 // false
|
|
switch (op_id) {
|
|
case IrBinOpCmpNotEq:
|
|
case IrBinOpCmpGreaterOrEq:
|
|
case IrBinOpCmpGreaterThan:
|
|
return ir_const_bool(ira, source_instr, true);
|
|
case IrBinOpCmpEq:
|
|
case IrBinOpCmpLessThan:
|
|
case IrBinOpCmpLessOrEq:
|
|
return ir_const_bool(ira, source_instr, false);
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// It must be a runtime comparison.
|
|
// For floats, emit a float comparison instruction.
|
|
bool op1_is_float = op1_scalar_type->id == ZigTypeIdFloat || op1_scalar_type->id == ZigTypeIdComptimeFloat;
|
|
bool op2_is_float = op2_scalar_type->id == ZigTypeIdFloat || op2_scalar_type->id == ZigTypeIdComptimeFloat;
|
|
if (op1_is_float && op2_is_float) {
|
|
// Implicit cast the smaller one to the larger one.
|
|
ZigType *dest_scalar_type;
|
|
if (op1_scalar_type->id == ZigTypeIdComptimeFloat) {
|
|
dest_scalar_type = op2_scalar_type;
|
|
} else if (op2_scalar_type->id == ZigTypeIdComptimeFloat) {
|
|
dest_scalar_type = op1_scalar_type;
|
|
} else if (op1_scalar_type->data.floating.bit_count >= op2_scalar_type->data.floating.bit_count) {
|
|
dest_scalar_type = op1_scalar_type;
|
|
} else {
|
|
dest_scalar_type = op2_scalar_type;
|
|
}
|
|
ZigType *dest_type = (result_type->id == ZigTypeIdVector) ?
|
|
get_vector_type(ira->codegen, result_type->data.vector.len, dest_scalar_type) : dest_scalar_type;
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, dest_type);
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, dest_type);
|
|
if (type_is_invalid(casted_op1->value->type) || type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_build_bin_op_gen(ira, source_instr, result_type, op_id, casted_op1, casted_op2, true);
|
|
}
|
|
|
|
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
|
|
// For mixed signed and unsigned integers, implicit cast both operands to a signed
|
|
// integer with + 1 bit.
|
|
// For mixed floats and integers, extract the integer part from the float, cast that to
|
|
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
|
|
// add/subtract 1.
|
|
bool dest_int_is_signed = false;
|
|
if (have_op1_cmp_zero) {
|
|
if (op1_cmp_zero == CmpLT) dest_int_is_signed = true;
|
|
} else if (op1_is_float) {
|
|
dest_int_is_signed = true;
|
|
} else if (op1_scalar_type->id == ZigTypeIdInt && op1_scalar_type->data.integral.is_signed) {
|
|
dest_int_is_signed = true;
|
|
}
|
|
if (have_op2_cmp_zero) {
|
|
if (op2_cmp_zero == CmpLT) dest_int_is_signed = true;
|
|
} else if (op2_is_float) {
|
|
dest_int_is_signed = true;
|
|
} else if (op2->value->type->id == ZigTypeIdInt && op2->value->type->data.integral.is_signed) {
|
|
dest_int_is_signed = true;
|
|
}
|
|
ZigType *dest_float_type = nullptr;
|
|
uint32_t op1_bits;
|
|
if (instr_is_comptime(op1) && result_type->id != ZigTypeIdVector) {
|
|
ZigValue *op1_val = ir_resolve_const(ira, op1, UndefOk);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (op1_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_instr, ira->codegen->builtin_types.entry_bool);
|
|
bool is_unsigned;
|
|
if (op1_is_float) {
|
|
BigInt bigint = {};
|
|
float_init_bigint(&bigint, op1_val);
|
|
Cmp zcmp = float_cmp_zero(op1_val);
|
|
if (float_has_fraction(op1_val)) {
|
|
if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
|
|
return ir_const_bool(ira, source_instr, op_id == IrBinOpCmpNotEq);
|
|
}
|
|
if (zcmp == CmpLT) {
|
|
bigint_decr(&bigint);
|
|
} else {
|
|
bigint_incr(&bigint);
|
|
}
|
|
}
|
|
op1_bits = bigint_bits_needed(&bigint);
|
|
is_unsigned = zcmp != CmpLT;
|
|
} else {
|
|
op1_bits = bigint_bits_needed(&op1_val->data.x_bigint);
|
|
is_unsigned = bigint_cmp_zero(&op1_val->data.x_bigint) != CmpLT;
|
|
}
|
|
if (is_unsigned && dest_int_is_signed) {
|
|
op1_bits += 1;
|
|
}
|
|
} else if (op1_is_float) {
|
|
ir_assert(op1_scalar_type->id == ZigTypeIdFloat, source_instr);
|
|
dest_float_type = op1_scalar_type;
|
|
} else {
|
|
ir_assert(op1_scalar_type->id == ZigTypeIdInt, source_instr);
|
|
op1_bits = op1_scalar_type->data.integral.bit_count;
|
|
if (!op1_scalar_type->data.integral.is_signed && dest_int_is_signed) {
|
|
op1_bits += 1;
|
|
}
|
|
}
|
|
uint32_t op2_bits;
|
|
if (instr_is_comptime(op2) && result_type->id != ZigTypeIdVector) {
|
|
ZigValue *op2_val = ir_resolve_const(ira, op2, UndefOk);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (op2_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_instr, ira->codegen->builtin_types.entry_bool);
|
|
bool is_unsigned;
|
|
if (op2_is_float) {
|
|
BigInt bigint = {};
|
|
float_init_bigint(&bigint, op2_val);
|
|
Cmp zcmp = float_cmp_zero(op2_val);
|
|
if (float_has_fraction(op2_val)) {
|
|
if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
|
|
return ir_const_bool(ira, source_instr, op_id == IrBinOpCmpNotEq);
|
|
}
|
|
if (zcmp == CmpLT) {
|
|
bigint_decr(&bigint);
|
|
} else {
|
|
bigint_incr(&bigint);
|
|
}
|
|
}
|
|
op2_bits = bigint_bits_needed(&bigint);
|
|
is_unsigned = zcmp != CmpLT;
|
|
} else {
|
|
op2_bits = bigint_bits_needed(&op2_val->data.x_bigint);
|
|
is_unsigned = bigint_cmp_zero(&op2_val->data.x_bigint) != CmpLT;
|
|
}
|
|
if (is_unsigned && dest_int_is_signed) {
|
|
op2_bits += 1;
|
|
}
|
|
} else if (op2_is_float) {
|
|
ir_assert(op2_scalar_type->id == ZigTypeIdFloat, source_instr);
|
|
dest_float_type = op2_scalar_type;
|
|
} else {
|
|
ir_assert(op2_scalar_type->id == ZigTypeIdInt, source_instr);
|
|
op2_bits = op2_scalar_type->data.integral.bit_count;
|
|
if (!op2_scalar_type->data.integral.is_signed && dest_int_is_signed) {
|
|
op2_bits += 1;
|
|
}
|
|
}
|
|
ZigType *dest_scalar_type = (dest_float_type == nullptr) ?
|
|
get_int_type(ira->codegen, dest_int_is_signed, (op1_bits > op2_bits) ? op1_bits : op2_bits) :
|
|
dest_float_type;
|
|
ZigType *dest_type = (result_type->id == ZigTypeIdVector) ?
|
|
get_vector_type(ira->codegen, result_type->data.vector.len, dest_scalar_type) : dest_scalar_type;
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, dest_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, dest_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_build_bin_op_gen(ira, source_instr, result_type, op_id, casted_op1, casted_op2, true);
|
|
}
|
|
|
|
static bool type_is_self_comparable(ZigType *ty, bool is_equality_cmp) {
|
|
if (type_is_numeric(ty)) {
|
|
return true;
|
|
}
|
|
switch (ty->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
zig_unreachable(); // handled with the type_is_numeric check above
|
|
|
|
case ZigTypeIdVector:
|
|
// Not every case is handled by the type_is_numeric check above,
|
|
// vectors of bool trigger this code path
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdAnyFrame:
|
|
return is_equality_cmp;
|
|
|
|
case ZigTypeIdPointer:
|
|
return is_equality_cmp || (ty->data.pointer.ptr_len == PtrLenC);
|
|
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFnFrame:
|
|
return false;
|
|
|
|
case ZigTypeIdOptional:
|
|
return is_equality_cmp && get_src_ptr_type(ty) != nullptr;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_try_evaluate_cmp_optional_non_optional_const(IrAnalyze *ira, IrInst *source_instr, ZigType *child_type,
|
|
IrInstGen *optional, IrInstGen *non_optional, IrBinOp op_id)
|
|
{
|
|
assert(optional->value->type->id == ZigTypeIdOptional);
|
|
assert(optional->value->type->data.maybe.child_type == non_optional->value->type);
|
|
assert(non_optional->value->type == child_type);
|
|
assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
|
|
|
|
if (instr_is_comptime(optional) && instr_is_comptime(non_optional)) {
|
|
ZigValue *optional_val = ir_resolve_const(ira, optional, UndefBad);
|
|
if (!optional_val) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *non_optional_val = ir_resolve_const(ira, non_optional, UndefBad);
|
|
if (!non_optional_val) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!optional_value_is_null(optional_val)) {
|
|
IrInstGen *optional_unwrapped = ir_analyze_optional_value_payload_value(ira, source_instr, optional, false);
|
|
if (type_is_invalid(optional_unwrapped->value->type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *ret = ir_try_evaluate_bin_op_cmp_const(ira, source_instr, optional_unwrapped, non_optional, child_type, op_id);
|
|
assert(ret != nullptr);
|
|
return ret;
|
|
}
|
|
return ir_const_bool(ira, source_instr, (op_id != IrBinOpCmpEq));
|
|
} else {
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_evaluate_cmp_optional_non_optional(IrAnalyze *ira, IrInst *source_instr, ZigType *child_type,
|
|
IrInstGen *optional, IrInstGen *non_optional, IrBinOp op_id)
|
|
{
|
|
assert(optional->value->type->id == ZigTypeIdOptional);
|
|
assert(optional->value->type->data.maybe.child_type == non_optional->value->type);
|
|
assert(non_optional->value->type == child_type);
|
|
assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
|
|
|
|
ZigType *result_type = ira->codegen->builtin_types.entry_bool;
|
|
ir_append_basic_block_gen(&ira->new_irb, ira->new_irb.current_basic_block);
|
|
|
|
IrBasicBlockGen *null_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalOptionalNull");
|
|
IrBasicBlockGen *non_null_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalOptionalNotNull");
|
|
IrBasicBlockGen *end_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalEnd");
|
|
|
|
IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, source_instr, optional);
|
|
ir_build_cond_br_gen(ira, source_instr, is_non_null, non_null_block, null_block);
|
|
|
|
ir_set_cursor_at_end_and_append_block_gen(&ira->new_irb, non_null_block);
|
|
IrInstGen *optional_unwrapped = ir_analyze_optional_value_payload_value(ira, source_instr, optional, false);
|
|
if (type_is_invalid(optional_unwrapped->value->type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *non_null_cmp_result = ir_build_bin_op_gen(ira, source_instr, result_type, op_id,
|
|
optional_unwrapped, non_optional, false); // safety check unnecessary for comparison operators
|
|
ir_build_br_gen(ira, source_instr, end_block);
|
|
|
|
|
|
ir_set_cursor_at_end_and_append_block_gen(&ira->new_irb, null_block);
|
|
IrInstGen *null_result = ir_const_bool(ira, source_instr, (op_id != IrBinOpCmpEq));
|
|
ir_build_br_gen(ira, source_instr, end_block);
|
|
|
|
ir_set_cursor_at_end_gen(&ira->new_irb, end_block);
|
|
int incoming_count = 2;
|
|
IrBasicBlockGen **incoming_blocks = heap::c_allocator.allocate_nonzero<IrBasicBlockGen *>(incoming_count);
|
|
incoming_blocks[0] = null_block;
|
|
incoming_blocks[1] = non_null_block;
|
|
IrInstGen **incoming_values = heap::c_allocator.allocate_nonzero<IrInstGen *>(incoming_count);
|
|
incoming_values[0] = null_result;
|
|
incoming_values[1] = non_null_cmp_result;
|
|
|
|
return ir_build_phi_gen(ira, source_instr, incoming_count, incoming_blocks, incoming_values, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_cmp_optional_non_optional(IrAnalyze *ira, IrInst *source_instr,
|
|
IrInstGen *op1, IrInstGen *op2, IrInstGen *optional, IrBinOp op_id)
|
|
{
|
|
assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
|
|
assert(optional->value->type->id == ZigTypeIdOptional);
|
|
assert(get_src_ptr_type(optional->value->type) == nullptr);
|
|
|
|
IrInstGen *non_optional;
|
|
if (op1 == optional) {
|
|
non_optional = op2;
|
|
} else if (op2 == optional) {
|
|
non_optional = op1;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
ZigType *child_type = optional->value->type->data.maybe.child_type;
|
|
bool child_type_matches = (child_type == non_optional->value->type);
|
|
if (!child_type_matches || !type_is_self_comparable(child_type, true)) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_instr->source_node, buf_sprintf("cannot compare types '%s' and '%s'",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
|
|
if (!child_type_matches) {
|
|
if (non_optional->value->type->id == ZigTypeIdOptional) {
|
|
add_error_note(ira->codegen, msg, source_instr->source_node, buf_sprintf(
|
|
"optional to optional comparison is only supported for optional pointer types"));
|
|
} else {
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("optional child type '%s' must be the same as non-optional type '%s'",
|
|
buf_ptr(&child_type->name),
|
|
buf_ptr(&non_optional->value->type->name)));
|
|
}
|
|
} else {
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("operator not supported for type '%s'",
|
|
buf_ptr(&child_type->name)));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (child_type->id == ZigTypeIdVector) {
|
|
ir_add_error_node(ira, source_instr->source_node, buf_sprintf("TODO add comparison of optional vector"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (IrInstGen *const_result = ir_try_evaluate_cmp_optional_non_optional_const(ira, source_instr, child_type,
|
|
optional, non_optional, op_id))
|
|
{
|
|
return const_result;
|
|
}
|
|
|
|
return ir_evaluate_cmp_optional_non_optional(ira, source_instr, child_type, optional, non_optional, op_id);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
|
|
IrInstGen *op1 = bin_op_instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = bin_op_instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AstNode *source_node = bin_op_instruction->base.base.source_node;
|
|
|
|
IrBinOp op_id = bin_op_instruction->op_id;
|
|
bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
|
|
if (is_equality_cmp && op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdNull) {
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, (op_id == IrBinOpCmpEq));
|
|
} else if (is_equality_cmp &&
|
|
((op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdOptional) ||
|
|
(op2->value->type->id == ZigTypeIdNull && op1->value->type->id == ZigTypeIdOptional)))
|
|
{
|
|
IrInstGen *maybe_op;
|
|
if (op1->value->type->id == ZigTypeIdNull) {
|
|
maybe_op = op2;
|
|
} else if (op2->value->type->id == ZigTypeIdNull) {
|
|
maybe_op = op1;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
if (instr_is_comptime(maybe_op)) {
|
|
ZigValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
|
|
if (!maybe_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
bool is_null = optional_value_is_null(maybe_val);
|
|
bool bool_result = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
|
|
}
|
|
|
|
IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, &bin_op_instruction->base.base, maybe_op);
|
|
|
|
if (op_id == IrBinOpCmpEq) {
|
|
return ir_build_bool_not_gen(ira, &bin_op_instruction->base.base, is_non_null);
|
|
} else {
|
|
return is_non_null;
|
|
}
|
|
} else if (is_equality_cmp &&
|
|
((op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdPointer &&
|
|
op2->value->type->data.pointer.ptr_len == PtrLenC) ||
|
|
(op2->value->type->id == ZigTypeIdNull && op1->value->type->id == ZigTypeIdPointer &&
|
|
op1->value->type->data.pointer.ptr_len == PtrLenC)))
|
|
{
|
|
IrInstGen *c_ptr_op;
|
|
if (op1->value->type->id == ZigTypeIdNull) {
|
|
c_ptr_op = op2;
|
|
} else if (op2->value->type->id == ZigTypeIdNull) {
|
|
c_ptr_op = op1;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
if (instr_is_comptime(c_ptr_op)) {
|
|
ZigValue *c_ptr_val = ir_resolve_const(ira, c_ptr_op, UndefOk);
|
|
if (!c_ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (c_ptr_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool);
|
|
bool is_null = c_ptr_val->data.x_ptr.special == ConstPtrSpecialNull ||
|
|
(c_ptr_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
|
|
c_ptr_val->data.x_ptr.data.hard_coded_addr.addr == 0);
|
|
bool bool_result = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
|
|
}
|
|
IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, &bin_op_instruction->base.base, c_ptr_op);
|
|
|
|
if (op_id == IrBinOpCmpEq) {
|
|
return ir_build_bool_not_gen(ira, &bin_op_instruction->base.base, is_non_null);
|
|
} else {
|
|
return is_non_null;
|
|
}
|
|
} else if (is_equality_cmp &&
|
|
(op1->value->type->id == ZigTypeIdOptional && get_src_ptr_type(op1->value->type) == nullptr))
|
|
{
|
|
return ir_analyze_cmp_optional_non_optional(ira, &bin_op_instruction->base.base, op1, op2, op1, op_id);
|
|
} else if(is_equality_cmp &&
|
|
(op2->value->type->id == ZigTypeIdOptional && get_src_ptr_type(op2->value->type) == nullptr))
|
|
{
|
|
return ir_analyze_cmp_optional_non_optional(ira, &bin_op_instruction->base.base, op1, op2, op2, op_id);
|
|
} else if (op1->value->type->id == ZigTypeIdNull || op2->value->type->id == ZigTypeIdNull) {
|
|
ZigType *non_null_type = (op1->value->type->id == ZigTypeIdNull) ? op2->value->type : op1->value->type;
|
|
ir_add_error_node(ira, source_node, buf_sprintf("comparison of '%s' with null",
|
|
buf_ptr(&non_null_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (is_equality_cmp && (
|
|
(op1->value->type->id == ZigTypeIdEnumLiteral && op2->value->type->id == ZigTypeIdUnion) ||
|
|
(op2->value->type->id == ZigTypeIdEnumLiteral && op1->value->type->id == ZigTypeIdUnion)))
|
|
{
|
|
// Support equality comparison between a union's tag value and a enum literal
|
|
IrInstGen *union_val = op1->value->type->id == ZigTypeIdUnion ? op1 : op2;
|
|
IrInstGen *enum_val = op1->value->type->id == ZigTypeIdUnion ? op2 : op1;
|
|
|
|
if (!is_tagged_union(union_val->value->type)) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("comparison of union and enum literal is only valid for tagged union types"));
|
|
add_error_note(ira->codegen, msg, union_val->value->type->data.unionation.decl_node,
|
|
buf_sprintf("type %s is not a tagged union",
|
|
buf_ptr(&union_val->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *tag_type = union_val->value->type->data.unionation.tag_type;
|
|
assert(tag_type != nullptr);
|
|
|
|
IrInstGen *casted_union = ir_implicit_cast(ira, union_val, tag_type);
|
|
if (type_is_invalid(casted_union->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_val = ir_implicit_cast(ira, enum_val, tag_type);
|
|
if (type_is_invalid(casted_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_union)) {
|
|
ZigValue *const_union_val = ir_resolve_const(ira, casted_union, UndefBad);
|
|
if (!const_union_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *const_enum_val = ir_resolve_const(ira, casted_val, UndefBad);
|
|
if (!const_enum_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Cmp cmp_result = bigint_cmp(&const_union_val->data.x_union.tag, &const_enum_val->data.x_enum_tag);
|
|
bool bool_result = (op_id == IrBinOpCmpEq) ? cmp_result == CmpEQ : cmp_result != CmpEQ;
|
|
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool,
|
|
op_id, casted_union, casted_val, bin_op_instruction->safety_check_on);
|
|
}
|
|
|
|
if (op1->value->type->id == ZigTypeIdErrorSet && op2->value->type->id == ZigTypeIdErrorSet) {
|
|
if (!is_equality_cmp) {
|
|
ir_add_error_node(ira, source_node, buf_sprintf("operator not allowed for errors"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *intersect_type = get_error_set_intersection(ira, op1->value->type, op2->value->type, source_node);
|
|
if (type_is_invalid(intersect_type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, intersect_type, source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// exception if one of the operators has the type of the empty error set, we allow the comparison
|
|
// (and make it comptime known)
|
|
// this is a function which is evaluated at comptime and returns an inferred error set will have an empty
|
|
// error set.
|
|
if (op1->value->type->data.error_set.err_count == 0 || op2->value->type->data.error_set.err_count == 0) {
|
|
bool are_equal = false;
|
|
bool answer;
|
|
if (op_id == IrBinOpCmpEq) {
|
|
answer = are_equal;
|
|
} else if (op_id == IrBinOpCmpNotEq) {
|
|
answer = !are_equal;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
|
|
}
|
|
|
|
if (!type_is_global_error_set(intersect_type)) {
|
|
if (intersect_type->data.error_set.err_count == 0) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("error sets '%s' and '%s' have no common errors",
|
|
buf_ptr(&op1->value->type->name), buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (op1->value->type->data.error_set.err_count == 1 && op2->value->type->data.error_set.err_count == 1) {
|
|
bool are_equal = true;
|
|
bool answer;
|
|
if (op_id == IrBinOpCmpEq) {
|
|
answer = are_equal;
|
|
} else if (op_id == IrBinOpCmpNotEq) {
|
|
answer = !are_equal;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
|
|
}
|
|
}
|
|
|
|
if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
|
|
ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool answer;
|
|
bool are_equal = op1_val->data.x_err_set->value == op2_val->data.x_err_set->value;
|
|
if (op_id == IrBinOpCmpEq) {
|
|
answer = are_equal;
|
|
} else if (op_id == IrBinOpCmpNotEq) {
|
|
answer = !are_equal;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool,
|
|
op_id, op1, op2, bin_op_instruction->safety_check_on);
|
|
}
|
|
|
|
if (type_is_numeric(op1->value->type) && type_is_numeric(op2->value->type)) {
|
|
// This operation allows any combination of integer and float types, regardless of the
|
|
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
|
|
// numeric types.
|
|
return ir_analyze_bin_op_cmp_numeric(ira, &bin_op_instruction->base.base, op1, op2, op_id);
|
|
}
|
|
|
|
IrInstGen *instructions[] = {op1, op2};
|
|
ZigType *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2);
|
|
if (type_is_invalid(resolved_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool operator_allowed = type_is_self_comparable(resolved_type, is_equality_cmp);
|
|
|
|
if (!operator_allowed) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *resolve_const_result = ir_try_evaluate_bin_op_cmp_const(ira, &bin_op_instruction->base.base, casted_op1,
|
|
casted_op2, resolved_type, op_id);
|
|
if (resolve_const_result != nullptr) {
|
|
return resolve_const_result;
|
|
}
|
|
|
|
ZigType *res_type = (resolved_type->id == ZigTypeIdVector) ?
|
|
get_vector_type(ira->codegen, resolved_type->data.vector.len, ira->codegen->builtin_types.entry_bool) :
|
|
ira->codegen->builtin_types.entry_bool;
|
|
return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, res_type,
|
|
op_id, casted_op1, casted_op2, bin_op_instruction->safety_check_on);
|
|
}
|
|
|
|
static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, IrInst* source_instr, ZigType *type_entry,
|
|
ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val, ZigValue *out_val)
|
|
{
|
|
bool is_int;
|
|
bool is_float;
|
|
Cmp op2_zcmp;
|
|
if (type_entry->id == ZigTypeIdInt || type_entry->id == ZigTypeIdComptimeInt) {
|
|
is_int = true;
|
|
is_float = false;
|
|
op2_zcmp = bigint_cmp_zero(&op2_val->data.x_bigint);
|
|
} else if (type_entry->id == ZigTypeIdFloat ||
|
|
type_entry->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
is_int = false;
|
|
is_float = true;
|
|
op2_zcmp = float_cmp_zero(op2_val);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
if ((op_id == IrBinOpDivUnspecified || op_id == IrBinOpRemRem || op_id == IrBinOpRemMod ||
|
|
op_id == IrBinOpDivTrunc || op_id == IrBinOpDivFloor) && op2_zcmp == CmpEQ)
|
|
{
|
|
return ir_add_error(ira, source_instr, buf_sprintf("division by zero"));
|
|
}
|
|
if ((op_id == IrBinOpRemRem || op_id == IrBinOpRemMod) && op2_zcmp == CmpLT) {
|
|
return ir_add_error(ira, source_instr, buf_sprintf("negative denominator"));
|
|
}
|
|
|
|
switch (op_id) {
|
|
case IrBinOpInvalid:
|
|
case IrBinOpBoolOr:
|
|
case IrBinOpBoolAnd:
|
|
case IrBinOpCmpEq:
|
|
case IrBinOpCmpNotEq:
|
|
case IrBinOpCmpLessThan:
|
|
case IrBinOpCmpGreaterThan:
|
|
case IrBinOpCmpLessOrEq:
|
|
case IrBinOpCmpGreaterOrEq:
|
|
case IrBinOpArrayCat:
|
|
case IrBinOpArrayMult:
|
|
case IrBinOpRemUnspecified:
|
|
zig_unreachable();
|
|
case IrBinOpBinOr:
|
|
assert(is_int);
|
|
bigint_or(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
break;
|
|
case IrBinOpBinXor:
|
|
assert(is_int);
|
|
bigint_xor(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
break;
|
|
case IrBinOpBinAnd:
|
|
assert(is_int);
|
|
bigint_and(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
break;
|
|
case IrBinOpBitShiftLeftExact:
|
|
assert(is_int);
|
|
bigint_shl(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
break;
|
|
case IrBinOpBitShiftLeftLossy:
|
|
assert(type_entry->id == ZigTypeIdInt);
|
|
bigint_shl_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
|
|
type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
|
|
break;
|
|
case IrBinOpBitShiftRightExact:
|
|
{
|
|
assert(is_int);
|
|
bigint_shr(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
BigInt orig_bigint;
|
|
bigint_shl(&orig_bigint, &out_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
if (bigint_cmp(&op1_val->data.x_bigint, &orig_bigint) != CmpEQ) {
|
|
return ir_add_error(ira, source_instr, buf_sprintf("exact shift shifted out 1 bits"));
|
|
}
|
|
break;
|
|
}
|
|
case IrBinOpBitShiftRightLossy:
|
|
assert(is_int);
|
|
bigint_shr(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
break;
|
|
case IrBinOpAdd:
|
|
if (is_int) {
|
|
bigint_add(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_add(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpAddWrap:
|
|
assert(type_entry->id == ZigTypeIdInt);
|
|
bigint_add_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
|
|
type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
|
|
break;
|
|
case IrBinOpSub:
|
|
if (is_int) {
|
|
bigint_sub(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_sub(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpSubWrap:
|
|
assert(type_entry->id == ZigTypeIdInt);
|
|
bigint_sub_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
|
|
type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
|
|
break;
|
|
case IrBinOpMult:
|
|
if (is_int) {
|
|
bigint_mul(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_mul(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpMultWrap:
|
|
assert(type_entry->id == ZigTypeIdInt);
|
|
bigint_mul_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
|
|
type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
|
|
break;
|
|
case IrBinOpDivUnspecified:
|
|
assert(is_float);
|
|
float_div(out_val, op1_val, op2_val);
|
|
break;
|
|
case IrBinOpDivTrunc:
|
|
if (is_int) {
|
|
bigint_div_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_div_trunc(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpDivFloor:
|
|
if (is_int) {
|
|
bigint_div_floor(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_div_floor(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpDivExact:
|
|
if (is_int) {
|
|
bigint_div_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
BigInt remainder;
|
|
bigint_rem(&remainder, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
if (bigint_cmp_zero(&remainder) != CmpEQ) {
|
|
return ir_add_error(ira, source_instr, buf_sprintf("exact division had a remainder"));
|
|
}
|
|
} else {
|
|
float_div_trunc(out_val, op1_val, op2_val);
|
|
ZigValue remainder = {};
|
|
float_rem(&remainder, op1_val, op2_val);
|
|
if (float_cmp_zero(&remainder) != CmpEQ) {
|
|
return ir_add_error(ira, source_instr, buf_sprintf("exact division had a remainder"));
|
|
}
|
|
}
|
|
break;
|
|
case IrBinOpRemRem:
|
|
if (is_int) {
|
|
bigint_rem(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_rem(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
case IrBinOpRemMod:
|
|
if (is_int) {
|
|
bigint_mod(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
|
|
} else {
|
|
float_mod(out_val, op1_val, op2_val);
|
|
}
|
|
break;
|
|
}
|
|
|
|
if (type_entry->id == ZigTypeIdInt) {
|
|
if (!bigint_fits_in_bits(&out_val->data.x_bigint, type_entry->data.integral.bit_count,
|
|
type_entry->data.integral.is_signed))
|
|
{
|
|
return ir_add_error(ira, source_instr, buf_sprintf("operation caused overflow"));
|
|
}
|
|
}
|
|
|
|
out_val->type = type_entry;
|
|
out_val->special = ConstValSpecialStatic;
|
|
return nullptr;
|
|
}
|
|
|
|
// This works on operands that have already been checked to be comptime known.
|
|
static IrInstGen *ir_analyze_math_op(IrAnalyze *ira, IrInst* source_instr,
|
|
ZigType *type_entry, ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val)
|
|
{
|
|
IrInstGen *result_instruction = ir_const(ira, source_instr, type_entry);
|
|
ZigValue *out_val = result_instruction->value;
|
|
if (type_entry->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, op1_val);
|
|
expand_undef_array(ira->codegen, op2_val);
|
|
out_val->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, out_val);
|
|
size_t len = type_entry->data.vector.len;
|
|
ZigType *scalar_type = type_entry->data.vector.elem_type;
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
ZigValue *scalar_op1_val = &op1_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_op2_val = &op2_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_out_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
assert(scalar_op1_val->type == scalar_type);
|
|
assert(scalar_out_val->type == scalar_type);
|
|
ErrorMsg *msg = ir_eval_math_op_scalar(ira, source_instr, scalar_type,
|
|
scalar_op1_val, op_id, scalar_op2_val, scalar_out_val);
|
|
if (msg != nullptr) {
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
out_val->type = type_entry;
|
|
out_val->special = ConstValSpecialStatic;
|
|
} else {
|
|
if (ir_eval_math_op_scalar(ira, source_instr, type_entry, op1_val, op_id, op2_val, out_val) != nullptr) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
return ir_implicit_cast(ira, result_instruction, type_entry);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bit_shift(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
|
|
IrInstGen *op1 = bin_op_instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = bin_op_instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *op1_type = op1->value->type;
|
|
ZigType *op2_type = op2->value->type;
|
|
|
|
if (op1_type->id == ZigTypeIdVector && op2_type->id != ZigTypeIdVector) {
|
|
ir_add_error(ira, &bin_op_instruction->op1->base,
|
|
buf_sprintf("bit shifting operation expected vector type, found '%s'",
|
|
buf_ptr(&op2_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (op1_type->id != ZigTypeIdVector && op2_type->id == ZigTypeIdVector) {
|
|
ir_add_error(ira, &bin_op_instruction->op1->base,
|
|
buf_sprintf("bit shifting operation expected vector type, found '%s'",
|
|
buf_ptr(&op1_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *op1_scalar_type = (op1_type->id == ZigTypeIdVector) ?
|
|
op1_type->data.vector.elem_type : op1_type;
|
|
ZigType *op2_scalar_type = (op2_type->id == ZigTypeIdVector) ?
|
|
op2_type->data.vector.elem_type : op2_type;
|
|
|
|
if (op1_scalar_type->id != ZigTypeIdInt && op1_scalar_type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &bin_op_instruction->op1->base,
|
|
buf_sprintf("bit shifting operation expected integer type, found '%s'",
|
|
buf_ptr(&op1_scalar_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (op2_scalar_type->id != ZigTypeIdInt && op2_scalar_type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &bin_op_instruction->op2->base,
|
|
buf_sprintf("shift amount has to be an integer type, but found '%s'",
|
|
buf_ptr(&op2_scalar_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_op2;
|
|
IrBinOp op_id = bin_op_instruction->op_id;
|
|
if (op1_scalar_type->id == ZigTypeIdComptimeInt) {
|
|
// comptime_int has no finite bit width
|
|
casted_op2 = op2;
|
|
|
|
if (op_id == IrBinOpBitShiftLeftLossy) {
|
|
op_id = IrBinOpBitShiftLeftExact;
|
|
}
|
|
|
|
if (!instr_is_comptime(op2)) {
|
|
ir_add_error(ira, &bin_op_instruction->base.base,
|
|
buf_sprintf("LHS of shift must be a fixed-width integer type, or RHS must be compile-time known"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (op2_val->data.x_bigint.is_negative) {
|
|
Buf *val_buf = buf_alloc();
|
|
bigint_append_buf(val_buf, &op2_val->data.x_bigint, 10);
|
|
ir_add_error(ira, &casted_op2->base,
|
|
buf_sprintf("shift by negative value %s", buf_ptr(val_buf)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else {
|
|
const unsigned bit_count = op1_scalar_type->data.integral.bit_count;
|
|
ZigType *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen,
|
|
bit_count > 0 ? bit_count - 1 : 0);
|
|
|
|
if (op1_type->id == ZigTypeIdVector) {
|
|
shift_amt_type = get_vector_type(ira->codegen, op1_type->data.vector.len,
|
|
shift_amt_type);
|
|
}
|
|
|
|
casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// This check is only valid iff op1 has at least one bit
|
|
if (bit_count > 0 && instr_is_comptime(casted_op2)) {
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue bit_count_value = {};
|
|
init_const_usize(ira->codegen, &bit_count_value, bit_count);
|
|
|
|
if (!value_cmp_numeric_val_all(op2_val, CmpLT, &bit_count_value)) {
|
|
ErrorMsg* msg = ir_add_error(ira,
|
|
&bin_op_instruction->base.base,
|
|
buf_sprintf("RHS of shift is too large for LHS type"));
|
|
add_error_note(ira->codegen, msg, op1->base.source_node,
|
|
buf_sprintf("type %s has only %u bits",
|
|
buf_ptr(&op1->value->type->name), bit_count));
|
|
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fast path for zero RHS
|
|
if (instr_is_comptime(casted_op2)) {
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (value_cmp_numeric_val_all(op2_val, CmpEQ, nullptr))
|
|
return ir_analyze_cast(ira, &bin_op_instruction->base.base, op1->value->type, op1);
|
|
}
|
|
|
|
if (instr_is_comptime(op1) && instr_is_comptime(casted_op2)) {
|
|
ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_math_op(ira, &bin_op_instruction->base.base, op1_type, op1_val, op_id, op2_val);
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, op1->value->type,
|
|
op_id, op1, casted_op2, bin_op_instruction->safety_check_on);
|
|
}
|
|
|
|
static bool ok_float_op(IrBinOp op) {
|
|
switch (op) {
|
|
case IrBinOpInvalid:
|
|
zig_unreachable();
|
|
case IrBinOpAdd:
|
|
case IrBinOpSub:
|
|
case IrBinOpMult:
|
|
case IrBinOpDivUnspecified:
|
|
case IrBinOpDivTrunc:
|
|
case IrBinOpDivFloor:
|
|
case IrBinOpDivExact:
|
|
case IrBinOpRemRem:
|
|
case IrBinOpRemMod:
|
|
case IrBinOpRemUnspecified:
|
|
return true;
|
|
|
|
case IrBinOpBoolOr:
|
|
case IrBinOpBoolAnd:
|
|
case IrBinOpCmpEq:
|
|
case IrBinOpCmpNotEq:
|
|
case IrBinOpCmpLessThan:
|
|
case IrBinOpCmpGreaterThan:
|
|
case IrBinOpCmpLessOrEq:
|
|
case IrBinOpCmpGreaterOrEq:
|
|
case IrBinOpBinOr:
|
|
case IrBinOpBinXor:
|
|
case IrBinOpBinAnd:
|
|
case IrBinOpBitShiftLeftLossy:
|
|
case IrBinOpBitShiftLeftExact:
|
|
case IrBinOpBitShiftRightLossy:
|
|
case IrBinOpBitShiftRightExact:
|
|
case IrBinOpAddWrap:
|
|
case IrBinOpSubWrap:
|
|
case IrBinOpMultWrap:
|
|
case IrBinOpArrayCat:
|
|
case IrBinOpArrayMult:
|
|
return false;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static bool is_pointer_arithmetic_allowed(ZigType *lhs_type, IrBinOp op) {
|
|
switch (op) {
|
|
case IrBinOpAdd:
|
|
case IrBinOpSub:
|
|
break;
|
|
default:
|
|
return false;
|
|
}
|
|
if (lhs_type->id != ZigTypeIdPointer)
|
|
return false;
|
|
switch (lhs_type->data.pointer.ptr_len) {
|
|
case PtrLenSingle:
|
|
return false;
|
|
case PtrLenUnknown:
|
|
case PtrLenC:
|
|
return true;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
// Returns true if integer `value` can be converted to `type_entry` without
|
|
// losing data.
|
|
// If `value` is a vector the function returns true if this is valid for every
|
|
// element.
|
|
static bool value_numeric_fits_in_type(ZigValue *value, ZigType *type_entry) {
|
|
assert(value->special == ConstValSpecialStatic);
|
|
assert(type_entry->id == ZigTypeIdInt);
|
|
|
|
switch (value->type->id) {
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdInt: {
|
|
return bigint_fits_in_bits(&value->data.x_bigint, type_entry->data.integral.bit_count,
|
|
type_entry->data.integral.is_signed);
|
|
}
|
|
case ZigTypeIdVector: {
|
|
for (size_t i = 0; i < value->type->data.vector.len; i++) {
|
|
ZigValue *scalar_value = &value->data.x_array.data.s_none.elements[i];
|
|
const bool result = bigint_fits_in_bits(&scalar_value->data.x_bigint,
|
|
type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
|
|
if (!result) return false;
|
|
}
|
|
return true;
|
|
}
|
|
default: zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static bool value_cmp_numeric_val(ZigValue *left, Cmp predicate, ZigValue *right, bool any) {
|
|
assert(left->special == ConstValSpecialStatic);
|
|
assert(right == nullptr || right->special == ConstValSpecialStatic);
|
|
|
|
switch (left->type->id) {
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdInt: {
|
|
const Cmp result = right ?
|
|
bigint_cmp(&left->data.x_bigint, &right->data.x_bigint) :
|
|
bigint_cmp_zero(&left->data.x_bigint);
|
|
return result == predicate;
|
|
}
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdFloat: {
|
|
if (float_is_nan(left))
|
|
return false;
|
|
if (right != nullptr && float_is_nan(right))
|
|
return false;
|
|
|
|
const Cmp result = right ? float_cmp(left, right) : float_cmp_zero(left);
|
|
return result == predicate;
|
|
}
|
|
case ZigTypeIdVector: {
|
|
for (size_t i = 0; i < left->type->data.vector.len; i++) {
|
|
ZigValue *scalar_val = &left->data.x_array.data.s_none.elements[i];
|
|
const bool result = value_cmp_numeric_val(scalar_val, predicate, right, any);
|
|
|
|
if (any && result)
|
|
return true; // This element satisfies the predicate
|
|
else if (!any && !result)
|
|
return false; // This element doesn't satisfy the predicate
|
|
}
|
|
return any ? false : true;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static bool value_cmp_numeric_val_any(ZigValue *left, Cmp predicate, ZigValue *right) {
|
|
return value_cmp_numeric_val(left, predicate, right, true);
|
|
}
|
|
|
|
static bool value_cmp_numeric_val_all(ZigValue *left, Cmp predicate, ZigValue *right) {
|
|
return value_cmp_numeric_val(left, predicate, right, false);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *op1 = instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrBinOp op_id = instruction->op_id;
|
|
|
|
// look for pointer math
|
|
if (is_pointer_arithmetic_allowed(op1->value->type, op_id)) {
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, ira->codegen->builtin_types.entry_usize);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// If either operand is undef, result is undef.
|
|
ZigValue *op1_val = nullptr;
|
|
ZigValue *op2_val = nullptr;
|
|
if (instr_is_comptime(op1)) {
|
|
op1_val = ir_resolve_const(ira, op1, UndefOk);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (op1_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, op1->value->type);
|
|
}
|
|
if (instr_is_comptime(casted_op2)) {
|
|
op2_val = ir_resolve_const(ira, casted_op2, UndefOk);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (op2_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, op1->value->type);
|
|
}
|
|
|
|
ZigType *elem_type = op1->value->type->data.pointer.child_type;
|
|
if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// NOTE: this variable is meaningful iff op2_val is not null!
|
|
uint64_t byte_offset;
|
|
if (op2_val != nullptr) {
|
|
uint64_t elem_offset;
|
|
if (!ir_resolve_usize(ira, casted_op2, &elem_offset))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
byte_offset = type_size(ira->codegen, elem_type) * elem_offset;
|
|
}
|
|
|
|
// Fast path for cases where the RHS is zero
|
|
if (op2_val != nullptr && byte_offset == 0) {
|
|
return op1;
|
|
}
|
|
|
|
ZigType *result_type = op1->value->type;
|
|
// Calculate the new alignment of the pointer
|
|
{
|
|
uint32_t align_bytes;
|
|
if ((err = resolve_ptr_align(ira, op1->value->type, &align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// If the addend is not a comptime-known value we can still count on
|
|
// it being a multiple of the type size
|
|
uint32_t addend = op2_val ? byte_offset : type_size(ira->codegen, elem_type);
|
|
|
|
// The resulting pointer is aligned to the lcd between the
|
|
// offset (an arbitrary number) and the alignment factor (always
|
|
// a power of two, non zero)
|
|
uint32_t new_align = 1 << ctzll(addend | align_bytes);
|
|
// Rough guard to prevent overflows
|
|
assert(new_align);
|
|
result_type = adjust_ptr_align(ira->codegen, result_type, new_align);
|
|
}
|
|
|
|
if (op2_val != nullptr && op1_val != nullptr &&
|
|
(op1->value->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
|
|
op1->value->data.x_ptr.special == ConstPtrSpecialNull))
|
|
{
|
|
uint64_t start_addr = (op1_val->data.x_ptr.special == ConstPtrSpecialNull) ?
|
|
0 : op1_val->data.x_ptr.data.hard_coded_addr.addr;
|
|
uint64_t new_addr;
|
|
if (op_id == IrBinOpAdd) {
|
|
new_addr = start_addr + byte_offset;
|
|
} else if (op_id == IrBinOpSub) {
|
|
new_addr = start_addr - byte_offset;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
|
|
result->value->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
|
|
result->value->data.x_ptr.mut = ConstPtrMutRuntimeVar;
|
|
result->value->data.x_ptr.data.hard_coded_addr.addr = new_addr;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &instruction->base.base, result_type, op_id, op1, casted_op2, true);
|
|
}
|
|
|
|
IrInstGen *instructions[] = {op1, op2};
|
|
ZigType *resolved_type = ir_resolve_peer_types(ira, instruction->base.base.source_node, nullptr, instructions, 2);
|
|
if (type_is_invalid(resolved_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *scalar_type = (resolved_type->id == ZigTypeIdVector) ?
|
|
resolved_type->data.vector.elem_type : resolved_type;
|
|
|
|
bool is_int = scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdComptimeInt;
|
|
bool is_float = scalar_type->id == ZigTypeIdFloat || scalar_type->id == ZigTypeIdComptimeFloat;
|
|
|
|
if (!is_int && !(is_float && ok_float_op(op_id))) {
|
|
AstNode *source_node = instruction->base.base.source_node;
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("invalid operands to binary expression: '%s' and '%s'",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Comptime integers have no fixed size
|
|
if (scalar_type->id == ZigTypeIdComptimeInt) {
|
|
if (op_id == IrBinOpAddWrap) {
|
|
op_id = IrBinOpAdd;
|
|
} else if (op_id == IrBinOpSubWrap) {
|
|
op_id = IrBinOpSub;
|
|
} else if (op_id == IrBinOpMultWrap) {
|
|
op_id = IrBinOpMult;
|
|
}
|
|
}
|
|
|
|
if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
|
|
ZigValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Promote division with negative numbers to signed
|
|
bool is_signed_div = value_cmp_numeric_val_any(op1_val, CmpLT, nullptr) ||
|
|
value_cmp_numeric_val_any(op2_val, CmpLT, nullptr);
|
|
|
|
if (op_id == IrBinOpDivUnspecified && is_int) {
|
|
// Default to truncating division and check if it's valid for the
|
|
// given operands if signed
|
|
op_id = IrBinOpDivTrunc;
|
|
|
|
if (is_signed_div) {
|
|
bool ok = false;
|
|
|
|
if (value_cmp_numeric_val_any(op2_val, CmpEQ, nullptr)) {
|
|
// the division by zero error will be caught later, but we don't have a
|
|
// division function ambiguity problem.
|
|
ok = true;
|
|
} else {
|
|
IrInstGen *trunc_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
|
|
op1_val, IrBinOpDivTrunc, op2_val);
|
|
if (type_is_invalid(trunc_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *floor_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
|
|
op1_val, IrBinOpDivFloor, op2_val);
|
|
if (type_is_invalid(floor_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cmp_val = ir_analyze_bin_op_cmp_numeric(ira, &instruction->base.base,
|
|
trunc_val, floor_val, IrBinOpCmpEq);
|
|
if (type_is_invalid(cmp_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// We can "upgrade" the operator only if trunc(a/b) == floor(a/b)
|
|
if (!ir_resolve_bool(ira, cmp_val, &ok))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!ok) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("division with '%s' and '%s': signed integers must use @divTrunc, @divFloor, or @divExact",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
} else if (op_id == IrBinOpRemUnspecified) {
|
|
op_id = IrBinOpRemRem;
|
|
|
|
if (is_signed_div) {
|
|
bool ok = false;
|
|
|
|
if (value_cmp_numeric_val_any(op2_val, CmpEQ, nullptr)) {
|
|
// the division by zero error will be caught later, but we don't have a
|
|
// division function ambiguity problem.
|
|
ok = true;
|
|
} else {
|
|
IrInstGen *rem_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
|
|
op1_val, IrBinOpRemRem, op2_val);
|
|
if (type_is_invalid(rem_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *mod_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
|
|
op1_val, IrBinOpRemMod, op2_val);
|
|
if (type_is_invalid(mod_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cmp_val = ir_analyze_bin_op_cmp_numeric(ira, &instruction->base.base,
|
|
rem_val, mod_val, IrBinOpCmpEq);
|
|
if (type_is_invalid(cmp_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// We can "upgrade" the operator only if mod(a,b) == rem(a,b)
|
|
if (!ir_resolve_bool(ira, cmp_val, &ok))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!ok) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("remainder division with '%s' and '%s': signed integers and floats must use @rem or @mod",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
|
|
return ir_analyze_math_op(ira, &instruction->base.base, resolved_type, op1_val, op_id, op2_val);
|
|
}
|
|
|
|
const bool is_signed_div =
|
|
(scalar_type->id == ZigTypeIdInt && scalar_type->data.integral.is_signed) ||
|
|
scalar_type->id == ZigTypeIdFloat;
|
|
|
|
// Warn the user to use the proper operators here
|
|
if (op_id == IrBinOpDivUnspecified && is_int) {
|
|
op_id = IrBinOpDivTrunc;
|
|
|
|
if (is_signed_div) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("division with '%s' and '%s': signed integers must use @divTrunc, @divFloor, or @divExact",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (op_id == IrBinOpRemUnspecified) {
|
|
op_id = IrBinOpRemRem;
|
|
|
|
if (is_signed_div) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("remainder division with '%s' and '%s': signed integers and floats must use @rem or @mod",
|
|
buf_ptr(&op1->value->type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
return ir_build_bin_op_gen(ira, &instruction->base.base, resolved_type,
|
|
op_id, casted_op1, casted_op2, instruction->safety_check_on);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_tuple_cat(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *op1, IrInstGen *op2)
|
|
{
|
|
Error err;
|
|
ZigType *op1_type = op1->value->type;
|
|
ZigType *op2_type = op2->value->type;
|
|
|
|
uint32_t op1_field_count = op1_type->data.structure.src_field_count;
|
|
uint32_t op2_field_count = op2_type->data.structure.src_field_count;
|
|
|
|
Buf *bare_name = buf_alloc();
|
|
Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
|
|
source_instr->scope, source_instr->source_node, bare_name);
|
|
ZigType *new_type = get_partial_container_type(ira->codegen, source_instr->scope,
|
|
ContainerKindStruct, source_instr->source_node, buf_ptr(name), bare_name, ContainerLayoutAuto);
|
|
new_type->data.structure.special = StructSpecialInferredTuple;
|
|
new_type->data.structure.resolve_status = ResolveStatusBeingInferred;
|
|
uint32_t new_field_count = op1_field_count + op2_field_count;
|
|
|
|
new_type->data.structure.src_field_count = new_field_count;
|
|
new_type->data.structure.fields = realloc_type_struct_fields(new_type->data.structure.fields,
|
|
0, new_field_count);
|
|
|
|
IrInstGen *new_struct_ptr = ir_resolve_result(ira, source_instr, no_result_loc(),
|
|
new_type, nullptr, false, true);
|
|
|
|
for (uint32_t i = 0; i < new_field_count; i += 1) {
|
|
TypeStructField *src_field;
|
|
if (i < op1_field_count) {
|
|
src_field = op1_type->data.structure.fields[i];
|
|
} else {
|
|
src_field = op2_type->data.structure.fields[i - op1_field_count];
|
|
}
|
|
TypeStructField *new_field = new_type->data.structure.fields[i];
|
|
new_field->name = buf_sprintf("%" PRIu32, i);
|
|
new_field->type_entry = src_field->type_entry;
|
|
new_field->type_val = src_field->type_val;
|
|
new_field->src_index = i;
|
|
new_field->decl_node = src_field->decl_node;
|
|
new_field->init_val = src_field->init_val;
|
|
new_field->is_comptime = src_field->is_comptime;
|
|
}
|
|
if ((err = type_resolve(ira->codegen, new_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigList<IrInstGen *> const_ptrs = {};
|
|
for (uint32_t i = 0; i < new_field_count; i += 1) {
|
|
TypeStructField *dst_field = new_type->data.structure.fields[i];
|
|
IrInstGen *src_struct_op;
|
|
TypeStructField *src_field;
|
|
if (i < op1_field_count) {
|
|
src_field = op1_type->data.structure.fields[i];
|
|
src_struct_op = op1;
|
|
} else {
|
|
src_field = op2_type->data.structure.fields[i - op1_field_count];
|
|
src_struct_op = op2;
|
|
}
|
|
IrInstGen *field_value = ir_analyze_struct_value_field_value(ira, source_instr,
|
|
src_struct_op, src_field);
|
|
if (type_is_invalid(field_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *dest_ptr = ir_analyze_struct_field_ptr(ira, source_instr, dst_field,
|
|
new_struct_ptr, new_type, true);
|
|
if (type_is_invalid(dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (instr_is_comptime(field_value)) {
|
|
const_ptrs.append(dest_ptr);
|
|
}
|
|
IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, dest_ptr, field_value,
|
|
true);
|
|
if (type_is_invalid(store_ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (const_ptrs.length != new_field_count) {
|
|
new_struct_ptr->value->special = ConstValSpecialRuntime;
|
|
for (size_t i = 0; i < const_ptrs.length; i += 1) {
|
|
IrInstGen *elem_result_loc = const_ptrs.at(i);
|
|
assert(elem_result_loc->value->special == ConstValSpecialStatic);
|
|
if (elem_result_loc->value->type->data.pointer.inferred_struct_field != nullptr) {
|
|
// This field will be generated comptime; no need to do this.
|
|
continue;
|
|
}
|
|
IrInstGen *deref = ir_get_deref(ira, &elem_result_loc->base, elem_result_loc, nullptr);
|
|
if (!type_requires_comptime(ira->codegen, elem_result_loc->value->type->data.pointer.child_type)) {
|
|
elem_result_loc->value->special = ConstValSpecialRuntime;
|
|
}
|
|
ir_analyze_store_ptr(ira, &elem_result_loc->base, elem_result_loc, deref, true);
|
|
}
|
|
}
|
|
|
|
const_ptrs.deinit();
|
|
|
|
return ir_get_deref(ira, source_instr, new_struct_ptr, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_array_cat(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
|
|
IrInstGen *op1 = instruction->op1->child;
|
|
ZigType *op1_type = op1->value->type;
|
|
if (type_is_invalid(op1_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = instruction->op2->child;
|
|
ZigType *op2_type = op2->value->type;
|
|
if (type_is_invalid(op2_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (is_tuple(op1_type) && is_tuple(op2_type)) {
|
|
return ir_analyze_tuple_cat(ira, &instruction->base.base, op1, op2);
|
|
}
|
|
|
|
ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
|
|
if (!op1_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
|
|
if (!op2_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *sentinel1 = nullptr;
|
|
ZigValue *op1_array_val;
|
|
size_t op1_array_index;
|
|
size_t op1_array_end;
|
|
ZigType *child_type;
|
|
if (op1_type->id == ZigTypeIdArray) {
|
|
child_type = op1_type->data.array.child_type;
|
|
op1_array_val = op1_val;
|
|
op1_array_index = 0;
|
|
op1_array_end = op1_type->data.array.len;
|
|
sentinel1 = op1_type->data.array.sentinel;
|
|
} else if (op1_type->id == ZigTypeIdPointer &&
|
|
op1_type->data.pointer.child_type == ira->codegen->builtin_types.entry_u8 &&
|
|
op1_type->data.pointer.sentinel != nullptr &&
|
|
op1_val->data.x_ptr.special == ConstPtrSpecialBaseArray)
|
|
{
|
|
child_type = op1_type->data.pointer.child_type;
|
|
op1_array_val = op1_val->data.x_ptr.data.base_array.array_val;
|
|
op1_array_index = op1_val->data.x_ptr.data.base_array.elem_index;
|
|
op1_array_end = op1_array_val->type->data.array.len;
|
|
sentinel1 = op1_type->data.pointer.sentinel;
|
|
} else if (is_slice(op1_type)) {
|
|
ZigType *ptr_type = op1_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
child_type = ptr_type->data.pointer.child_type;
|
|
ZigValue *ptr_val = op1_val->data.x_struct.fields[slice_ptr_index];
|
|
assert(ptr_val->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
op1_array_val = ptr_val->data.x_ptr.data.base_array.array_val;
|
|
op1_array_index = ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
ZigValue *len_val = op1_val->data.x_struct.fields[slice_len_index];
|
|
op1_array_end = op1_array_index + bigint_as_usize(&len_val->data.x_bigint);
|
|
sentinel1 = ptr_type->data.pointer.sentinel;
|
|
} else if (op1_type->id == ZigTypeIdPointer &&
|
|
op1_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
op1_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
ZigType *array_type = op1_type->data.pointer.child_type;
|
|
child_type = array_type->data.array.child_type;
|
|
op1_array_val = const_ptr_pointee(ira, ira->codegen, op1_val, op1->base.source_node);
|
|
if (op1_array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
op1_array_index = 0;
|
|
op1_array_end = array_type->data.array.len;
|
|
sentinel1 = array_type->data.array.sentinel;
|
|
} else {
|
|
ir_add_error(ira, &op1->base, buf_sprintf("expected array, found '%s'", buf_ptr(&op1->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *sentinel2 = nullptr;
|
|
ZigValue *op2_array_val;
|
|
size_t op2_array_index;
|
|
size_t op2_array_end;
|
|
bool op2_type_valid;
|
|
if (op2_type->id == ZigTypeIdArray) {
|
|
op2_type_valid = op2_type->data.array.child_type == child_type;
|
|
op2_array_val = op2_val;
|
|
op2_array_index = 0;
|
|
op2_array_end = op2_array_val->type->data.array.len;
|
|
sentinel2 = op2_type->data.array.sentinel;
|
|
} else if (op2_type->id == ZigTypeIdPointer &&
|
|
op2_type->data.pointer.sentinel != nullptr &&
|
|
op2_val->data.x_ptr.special == ConstPtrSpecialBaseArray)
|
|
{
|
|
op2_type_valid = op2_type->data.pointer.child_type == child_type;
|
|
op2_array_val = op2_val->data.x_ptr.data.base_array.array_val;
|
|
op2_array_index = op2_val->data.x_ptr.data.base_array.elem_index;
|
|
op2_array_end = op2_array_val->type->data.array.len;
|
|
|
|
sentinel2 = op2_type->data.pointer.sentinel;
|
|
} else if (is_slice(op2_type)) {
|
|
ZigType *ptr_type = op2_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
op2_type_valid = ptr_type->data.pointer.child_type == child_type;
|
|
ZigValue *ptr_val = op2_val->data.x_struct.fields[slice_ptr_index];
|
|
assert(ptr_val->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
op2_array_val = ptr_val->data.x_ptr.data.base_array.array_val;
|
|
op2_array_index = ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
ZigValue *len_val = op2_val->data.x_struct.fields[slice_len_index];
|
|
op2_array_end = op2_array_index + bigint_as_usize(&len_val->data.x_bigint);
|
|
|
|
sentinel2 = ptr_type->data.pointer.sentinel;
|
|
} else if (op2_type->id == ZigTypeIdPointer && op2_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
op2_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
ZigType *array_type = op2_type->data.pointer.child_type;
|
|
op2_type_valid = array_type->data.array.child_type == child_type;
|
|
op2_array_val = const_ptr_pointee(ira, ira->codegen, op2_val, op2->base.source_node);
|
|
if (op2_array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
op2_array_index = 0;
|
|
op2_array_end = array_type->data.array.len;
|
|
|
|
sentinel2 = array_type->data.array.sentinel;
|
|
} else {
|
|
ir_add_error(ira, &op2->base,
|
|
buf_sprintf("expected array or C string literal, found '%s'", buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!op2_type_valid) {
|
|
ir_add_error(ira, &op2->base, buf_sprintf("expected array of type '%s', found '%s'",
|
|
buf_ptr(&child_type->name),
|
|
buf_ptr(&op2->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *sentinel;
|
|
if (sentinel1 != nullptr && sentinel2 != nullptr) {
|
|
// When there is a sentinel mismatch, no sentinel on the result. The type system
|
|
// will catch this if it is a problem.
|
|
sentinel = const_values_equal(ira->codegen, sentinel1, sentinel2) ? sentinel1 : nullptr;
|
|
} else if (sentinel1 != nullptr) {
|
|
sentinel = sentinel1;
|
|
} else if (sentinel2 != nullptr) {
|
|
sentinel = sentinel2;
|
|
} else {
|
|
sentinel = nullptr;
|
|
}
|
|
|
|
// The type of result is populated in the following if blocks
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
ZigValue *out_val = result->value;
|
|
|
|
ZigValue *out_array_val;
|
|
size_t new_len = (op1_array_end - op1_array_index) + (op2_array_end - op2_array_index);
|
|
if (op1_type->id == ZigTypeIdPointer || op2_type->id == ZigTypeIdPointer) {
|
|
out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
out_array_val->special = ConstValSpecialStatic;
|
|
out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
|
|
|
|
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
out_val->data.x_ptr.data.ref.pointee = out_array_val;
|
|
out_val->type = get_pointer_to_type(ira->codegen, out_array_val->type, true);
|
|
} else if (is_slice(op1_type) || is_slice(op2_type)) {
|
|
ZigType *ptr_type = get_pointer_to_type_extra2(ira->codegen, child_type,
|
|
true, false, PtrLenUnknown, 0, 0, 0, false,
|
|
VECTOR_INDEX_NONE, nullptr, sentinel);
|
|
result->value->type = get_slice_type(ira->codegen, ptr_type);
|
|
out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
out_array_val->special = ConstValSpecialStatic;
|
|
out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
|
|
|
|
out_val->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
|
|
out_val->data.x_struct.fields[slice_ptr_index]->type = ptr_type;
|
|
out_val->data.x_struct.fields[slice_ptr_index]->special = ConstValSpecialStatic;
|
|
out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.data.base_array.array_val = out_array_val;
|
|
out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.data.base_array.elem_index = 0;
|
|
|
|
out_val->data.x_struct.fields[slice_len_index]->type = ira->codegen->builtin_types.entry_usize;
|
|
out_val->data.x_struct.fields[slice_len_index]->special = ConstValSpecialStatic;
|
|
bigint_init_unsigned(&out_val->data.x_struct.fields[slice_len_index]->data.x_bigint, new_len);
|
|
} else if (op1_type->id == ZigTypeIdArray || op2_type->id == ZigTypeIdArray) {
|
|
result->value->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
|
|
out_array_val = out_val;
|
|
} else {
|
|
result->value->type = get_pointer_to_type_extra2(ira->codegen, child_type, true, false, PtrLenUnknown,
|
|
0, 0, 0, false, VECTOR_INDEX_NONE, nullptr, sentinel);
|
|
out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
out_array_val->special = ConstValSpecialStatic;
|
|
out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
|
|
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_ptr.data.base_array.array_val = out_array_val;
|
|
out_val->data.x_ptr.data.base_array.elem_index = 0;
|
|
}
|
|
|
|
if (op1_array_val->data.x_array.special == ConstArraySpecialUndef &&
|
|
op2_array_val->data.x_array.special == ConstArraySpecialUndef)
|
|
{
|
|
out_array_val->data.x_array.special = ConstArraySpecialUndef;
|
|
return result;
|
|
}
|
|
|
|
uint64_t full_len = new_len + ((sentinel != nullptr) ? 1 : 0);
|
|
out_array_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(full_len);
|
|
// TODO handle the buf case here for an optimization
|
|
expand_undef_array(ira->codegen, op1_array_val);
|
|
expand_undef_array(ira->codegen, op2_array_val);
|
|
|
|
size_t next_index = 0;
|
|
for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) {
|
|
ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
|
|
copy_const_val(ira->codegen, elem_dest_val, &op1_array_val->data.x_array.data.s_none.elements[i]);
|
|
elem_dest_val->parent.id = ConstParentIdArray;
|
|
elem_dest_val->parent.data.p_array.array_val = out_array_val;
|
|
elem_dest_val->parent.data.p_array.elem_index = next_index;
|
|
}
|
|
for (size_t i = op2_array_index; i < op2_array_end; i += 1, next_index += 1) {
|
|
ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
|
|
copy_const_val(ira->codegen, elem_dest_val, &op2_array_val->data.x_array.data.s_none.elements[i]);
|
|
elem_dest_val->parent.id = ConstParentIdArray;
|
|
elem_dest_val->parent.data.p_array.array_val = out_array_val;
|
|
elem_dest_val->parent.data.p_array.elem_index = next_index;
|
|
}
|
|
if (next_index < full_len) {
|
|
ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
|
|
copy_const_val(ira->codegen, elem_dest_val, sentinel);
|
|
elem_dest_val->parent.id = ConstParentIdArray;
|
|
elem_dest_val->parent.data.p_array.array_val = out_array_val;
|
|
elem_dest_val->parent.data.p_array.elem_index = next_index;
|
|
next_index += 1;
|
|
}
|
|
assert(next_index == full_len);
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_tuple_mult(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *op1, IrInstGen *op2)
|
|
{
|
|
Error err;
|
|
ZigType *op1_type = op1->value->type;
|
|
uint64_t op1_field_count = op1_type->data.structure.src_field_count;
|
|
|
|
uint64_t mult_amt;
|
|
if (!ir_resolve_usize(ira, op2, &mult_amt))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t new_field_count;
|
|
if (mul_u64_overflow(op1_field_count, mult_amt, &new_field_count)) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("operation results in overflow"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Buf *bare_name = buf_alloc();
|
|
Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
|
|
source_instr->scope, source_instr->source_node, bare_name);
|
|
ZigType *new_type = get_partial_container_type(ira->codegen, source_instr->scope,
|
|
ContainerKindStruct, source_instr->source_node, buf_ptr(name), bare_name, ContainerLayoutAuto);
|
|
new_type->data.structure.special = StructSpecialInferredTuple;
|
|
new_type->data.structure.resolve_status = ResolveStatusBeingInferred;
|
|
new_type->data.structure.src_field_count = new_field_count;
|
|
new_type->data.structure.fields = realloc_type_struct_fields(
|
|
new_type->data.structure.fields, 0, new_field_count);
|
|
|
|
IrInstGen *new_struct_ptr = ir_resolve_result(ira, source_instr, no_result_loc(),
|
|
new_type, nullptr, false, true);
|
|
|
|
for (uint64_t i = 0; i < new_field_count; i += 1) {
|
|
TypeStructField *src_field = op1_type->data.structure.fields[i % op1_field_count];
|
|
TypeStructField *new_field = new_type->data.structure.fields[i];
|
|
|
|
new_field->name = buf_sprintf("%" ZIG_PRI_u64, i);
|
|
new_field->type_entry = src_field->type_entry;
|
|
new_field->type_val = src_field->type_val;
|
|
new_field->src_index = i;
|
|
new_field->decl_node = src_field->decl_node;
|
|
new_field->init_val = src_field->init_val;
|
|
new_field->is_comptime = src_field->is_comptime;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, new_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigList<IrInstGen *> const_ptrs = {};
|
|
for (uint64_t i = 0; i < new_field_count; i += 1) {
|
|
TypeStructField *src_field = op1_type->data.structure.fields[i % op1_field_count];
|
|
TypeStructField *dst_field = new_type->data.structure.fields[i];
|
|
|
|
IrInstGen *field_value = ir_analyze_struct_value_field_value(
|
|
ira, source_instr, op1, src_field);
|
|
if (type_is_invalid(field_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *dest_ptr = ir_analyze_struct_field_ptr(
|
|
ira, source_instr, dst_field, new_struct_ptr, new_type, true);
|
|
if (type_is_invalid(dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(field_value)) {
|
|
const_ptrs.append(dest_ptr);
|
|
}
|
|
|
|
IrInstGen *store_ptr_inst = ir_analyze_store_ptr(
|
|
ira, source_instr, dest_ptr, field_value, true);
|
|
if (type_is_invalid(store_ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (const_ptrs.length != new_field_count) {
|
|
new_struct_ptr->value->special = ConstValSpecialRuntime;
|
|
for (size_t i = 0; i < const_ptrs.length; i += 1) {
|
|
IrInstGen *elem_result_loc = const_ptrs.at(i);
|
|
assert(elem_result_loc->value->special == ConstValSpecialStatic);
|
|
if (elem_result_loc->value->type->data.pointer.inferred_struct_field != nullptr) {
|
|
// This field will be generated comptime; no need to do this.
|
|
continue;
|
|
}
|
|
IrInstGen *deref = ir_get_deref(ira, &elem_result_loc->base, elem_result_loc, nullptr);
|
|
if (!type_requires_comptime(ira->codegen, elem_result_loc->value->type->data.pointer.child_type)) {
|
|
elem_result_loc->value->special = ConstValSpecialRuntime;
|
|
}
|
|
IrInstGen *store_ptr_inst = ir_analyze_store_ptr(
|
|
ira, &elem_result_loc->base, elem_result_loc, deref, true);
|
|
if (type_is_invalid(store_ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
const_ptrs.deinit();
|
|
|
|
return ir_get_deref(ira, source_instr, new_struct_ptr, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_array_mult(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
|
|
IrInstGen *op1 = instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool want_ptr_to_array = false;
|
|
ZigType *array_type;
|
|
ZigValue *array_val;
|
|
if (op1->value->type->id == ZigTypeIdArray) {
|
|
array_type = op1->value->type;
|
|
array_val = ir_resolve_const(ira, op1, UndefOk);
|
|
if (array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (op1->value->type->id == ZigTypeIdPointer &&
|
|
op1->value->type->data.pointer.ptr_len == PtrLenSingle &&
|
|
op1->value->type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
array_type = op1->value->type->data.pointer.child_type;
|
|
IrInstGen *array_inst = ir_get_deref(ira, &op1->base, op1, nullptr);
|
|
if (type_is_invalid(array_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
array_val = ir_resolve_const(ira, array_inst, UndefOk);
|
|
if (array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
want_ptr_to_array = true;
|
|
} else if (is_tuple(op1->value->type)) {
|
|
return ir_analyze_tuple_mult(ira, &instruction->base.base, op1, op2);
|
|
} else {
|
|
ir_add_error(ira, &op1->base, buf_sprintf("expected array type, found '%s'", buf_ptr(&op1->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
uint64_t mult_amt;
|
|
if (!ir_resolve_usize(ira, op2, &mult_amt))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t old_array_len = array_type->data.array.len;
|
|
uint64_t new_array_len;
|
|
|
|
if (mul_u64_overflow(old_array_len, mult_amt, &new_array_len)) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("operation results in overflow"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_type = array_type->data.array.child_type;
|
|
ZigType *result_array_type = get_array_type(ira->codegen, child_type, new_array_len,
|
|
array_type->data.array.sentinel);
|
|
|
|
IrInstGen *array_result;
|
|
if (array_val->special == ConstValSpecialUndef || array_val->data.x_array.special == ConstArraySpecialUndef) {
|
|
array_result = ir_const_undef(ira, &instruction->base.base, result_array_type);
|
|
} else {
|
|
array_result = ir_const(ira, &instruction->base.base, result_array_type);
|
|
ZigValue *out_val = array_result->value;
|
|
|
|
switch (type_has_one_possible_value(ira->codegen, result_array_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
goto skip_computation;
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
// TODO optimize the buf case
|
|
expand_undef_array(ira->codegen, array_val);
|
|
size_t extra_null_term = (array_type->data.array.sentinel != nullptr) ? 1 : 0;
|
|
out_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(new_array_len + extra_null_term);
|
|
|
|
uint64_t i = 0;
|
|
for (uint64_t x = 0; x < mult_amt; x += 1) {
|
|
for (uint64_t y = 0; y < old_array_len; y += 1) {
|
|
ZigValue *elem_dest_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
copy_const_val(ira->codegen, elem_dest_val, &array_val->data.x_array.data.s_none.elements[y]);
|
|
elem_dest_val->parent.id = ConstParentIdArray;
|
|
elem_dest_val->parent.data.p_array.array_val = out_val;
|
|
elem_dest_val->parent.data.p_array.elem_index = i;
|
|
i += 1;
|
|
}
|
|
}
|
|
assert(i == new_array_len);
|
|
|
|
if (array_type->data.array.sentinel != nullptr) {
|
|
ZigValue *elem_dest_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
copy_const_val(ira->codegen, elem_dest_val, array_type->data.array.sentinel);
|
|
elem_dest_val->parent.id = ConstParentIdArray;
|
|
elem_dest_val->parent.data.p_array.array_val = out_val;
|
|
elem_dest_val->parent.data.p_array.elem_index = i;
|
|
i += 1;
|
|
}
|
|
}
|
|
skip_computation:
|
|
if (want_ptr_to_array) {
|
|
return ir_get_ref(ira, &instruction->base.base, array_result, true, false);
|
|
} else {
|
|
return array_result;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_merge_err_sets(IrAnalyze *ira,
|
|
IrInstSrcMergeErrSets *instruction)
|
|
{
|
|
ZigType *op1_type = ir_resolve_error_set_type(ira, &instruction->base.base, instruction->op1->child);
|
|
if (type_is_invalid(op1_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *op2_type = ir_resolve_error_set_type(ira, &instruction->base.base, instruction->op2->child);
|
|
if (type_is_invalid(op2_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, op1_type, instruction->op1->child->base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!resolve_inferred_error_set(ira->codegen, op2_type, instruction->op2->child->base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (type_is_global_error_set(op1_type) ||
|
|
type_is_global_error_set(op2_type))
|
|
{
|
|
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_global_error_set);
|
|
}
|
|
|
|
size_t errors_count = ira->codegen->errors_by_index.length;
|
|
ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
|
|
for (uint32_t i = 0, count = op1_type->data.error_set.err_count; i < count; i += 1) {
|
|
ErrorTableEntry *error_entry = op1_type->data.error_set.errors[i];
|
|
assert(errors[error_entry->value] == nullptr);
|
|
errors[error_entry->value] = error_entry;
|
|
}
|
|
ZigType *result_type = get_error_set_union(ira->codegen, errors, op1_type, op2_type, instruction->type_name);
|
|
heap::c_allocator.deallocate(errors, errors_count);
|
|
|
|
return ir_const_type(ira, &instruction->base.base, result_type);
|
|
}
|
|
|
|
|
|
static IrInstGen *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
|
|
IrBinOp op_id = bin_op_instruction->op_id;
|
|
switch (op_id) {
|
|
case IrBinOpInvalid:
|
|
zig_unreachable();
|
|
case IrBinOpBoolOr:
|
|
case IrBinOpBoolAnd:
|
|
return ir_analyze_bin_op_bool(ira, bin_op_instruction);
|
|
case IrBinOpCmpEq:
|
|
case IrBinOpCmpNotEq:
|
|
case IrBinOpCmpLessThan:
|
|
case IrBinOpCmpGreaterThan:
|
|
case IrBinOpCmpLessOrEq:
|
|
case IrBinOpCmpGreaterOrEq:
|
|
return ir_analyze_bin_op_cmp(ira, bin_op_instruction);
|
|
case IrBinOpBitShiftLeftLossy:
|
|
case IrBinOpBitShiftLeftExact:
|
|
case IrBinOpBitShiftRightLossy:
|
|
case IrBinOpBitShiftRightExact:
|
|
return ir_analyze_bit_shift(ira, bin_op_instruction);
|
|
case IrBinOpBinOr:
|
|
case IrBinOpBinXor:
|
|
case IrBinOpBinAnd:
|
|
case IrBinOpAdd:
|
|
case IrBinOpAddWrap:
|
|
case IrBinOpSub:
|
|
case IrBinOpSubWrap:
|
|
case IrBinOpMult:
|
|
case IrBinOpMultWrap:
|
|
case IrBinOpDivUnspecified:
|
|
case IrBinOpDivTrunc:
|
|
case IrBinOpDivFloor:
|
|
case IrBinOpDivExact:
|
|
case IrBinOpRemUnspecified:
|
|
case IrBinOpRemRem:
|
|
case IrBinOpRemMod:
|
|
return ir_analyze_bin_op_math(ira, bin_op_instruction);
|
|
case IrBinOpArrayCat:
|
|
return ir_analyze_array_cat(ira, bin_op_instruction);
|
|
case IrBinOpArrayMult:
|
|
return ir_analyze_array_mult(ira, bin_op_instruction);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstSrcDeclVar *decl_var_instruction) {
|
|
Error err;
|
|
ZigVar *var = decl_var_instruction->var;
|
|
|
|
ZigType *explicit_type = nullptr;
|
|
IrInstGen *var_type = nullptr;
|
|
if (decl_var_instruction->var_type != nullptr) {
|
|
var_type = decl_var_instruction->var_type->child;
|
|
ZigType *proposed_type = ir_resolve_type(ira, var_type);
|
|
explicit_type = validate_var_type(ira->codegen, &var->decl_node->data.variable_declaration, proposed_type);
|
|
if (type_is_invalid(explicit_type)) {
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
AstNode *source_node = decl_var_instruction->base.base.source_node;
|
|
|
|
bool is_comptime_var = ir_get_var_is_comptime(var);
|
|
|
|
bool var_class_requires_const = false;
|
|
|
|
IrInstGen *var_ptr = decl_var_instruction->ptr->child;
|
|
// if this is null, a compiler error happened and did not initialize the variable.
|
|
// if there are no compile errors there may be a missing ir_expr_wrap in pass1 IR generation.
|
|
if (var_ptr == nullptr || type_is_invalid(var_ptr->value->type)) {
|
|
ir_assert(var_ptr != nullptr || ira->codegen->errors.length != 0, &decl_var_instruction->base.base);
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// The ir_build_var_decl_src call is supposed to pass a pointer to the allocation, not an initialization value.
|
|
ir_assert(var_ptr->value->type->id == ZigTypeIdPointer, &decl_var_instruction->base.base);
|
|
|
|
ZigType *result_type = var_ptr->value->type->data.pointer.child_type;
|
|
if (type_is_invalid(result_type)) {
|
|
result_type = ira->codegen->builtin_types.entry_invalid;
|
|
} else if (result_type->id == ZigTypeIdUnreachable || result_type->id == ZigTypeIdOpaque) {
|
|
zig_unreachable();
|
|
}
|
|
|
|
ZigValue *init_val = nullptr;
|
|
if (instr_is_comptime(var_ptr) && var_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, var_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
init_val = const_ptr_pointee(ira, ira->codegen, ptr_val, decl_var_instruction->base.base.source_node);
|
|
if (init_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (is_comptime_var) {
|
|
if (var->gen_is_const) {
|
|
var->const_value = init_val;
|
|
} else {
|
|
var->const_value = ira->codegen->pass1_arena->create<ZigValue>();
|
|
copy_const_val(ira->codegen, var->const_value, init_val);
|
|
}
|
|
}
|
|
}
|
|
|
|
switch (type_requires_comptime(ira->codegen, result_type)) {
|
|
case ReqCompTimeInvalid:
|
|
result_type = ira->codegen->builtin_types.entry_invalid;
|
|
break;
|
|
case ReqCompTimeYes:
|
|
var_class_requires_const = true;
|
|
if (!var->gen_is_const && !is_comptime_var) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("variable of type '%s' must be const or comptime",
|
|
buf_ptr(&result_type->name)));
|
|
if(result_type->id == ZigTypeIdComptimeInt || result_type -> id == ZigTypeIdComptimeFloat) {
|
|
add_error_note(ira->codegen, msg, source_node, buf_sprintf("to modify this variable at runtime, it must be given an explicit fixed-size number type"));
|
|
}
|
|
result_type = ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
break;
|
|
case ReqCompTimeNo:
|
|
if (init_val != nullptr && value_is_comptime(init_val)) {
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
decl_var_instruction->base.base.source_node, init_val, UndefOk)))
|
|
{
|
|
result_type = ira->codegen->builtin_types.entry_invalid;
|
|
} else if (init_val->type->id == ZigTypeIdFn &&
|
|
init_val->special != ConstValSpecialUndef &&
|
|
init_val->data.x_ptr.special == ConstPtrSpecialFunction &&
|
|
init_val->data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
|
|
{
|
|
var_class_requires_const = true;
|
|
if (!var->src_is_const && !is_comptime_var) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("functions marked inline must be stored in const or comptime var"));
|
|
AstNode *proto_node = init_val->data.x_ptr.data.fn.fn_entry->proto_node;
|
|
add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
|
|
result_type = ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
|
|
while (var->next_var != nullptr) {
|
|
var = var->next_var;
|
|
}
|
|
|
|
// This must be done after possibly creating a new variable above
|
|
var->ref_count = 0;
|
|
|
|
var->ptr_instruction = var_ptr;
|
|
var->var_type = result_type;
|
|
assert(var->var_type);
|
|
|
|
if (type_is_invalid(result_type)) {
|
|
return ir_const_void(ira, &decl_var_instruction->base.base);
|
|
}
|
|
|
|
if (decl_var_instruction->align_value == nullptr) {
|
|
if ((err = type_resolve(ira->codegen, result_type, ResolveStatusAlignmentKnown))) {
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
return ir_const_void(ira, &decl_var_instruction->base.base);
|
|
}
|
|
var->align_bytes = get_ptr_align(ira->codegen, var_ptr->value->type);
|
|
} else {
|
|
if (!ir_resolve_align(ira, decl_var_instruction->align_value->child, nullptr, &var->align_bytes)) {
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
|
|
if (init_val != nullptr && value_is_comptime(init_val)) {
|
|
// Resolve ConstPtrMutInfer
|
|
if (var->gen_is_const) {
|
|
var_ptr->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
} else if (is_comptime_var) {
|
|
var_ptr->value->data.x_ptr.mut = ConstPtrMutComptimeVar;
|
|
} else {
|
|
// we need a runtime ptr but we have a comptime val.
|
|
// since it's a comptime val there are no instructions for it.
|
|
// we memcpy the init value here
|
|
IrInstGen *deref = ir_get_deref(ira, &var_ptr->base, var_ptr, nullptr);
|
|
if (type_is_invalid(deref->value->type)) {
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
// If this assertion trips, something is wrong with the IR instructions, because
|
|
// we expected the above deref to return a constant value, but it created a runtime
|
|
// instruction.
|
|
assert(deref->value->special != ConstValSpecialRuntime);
|
|
var_ptr->value->special = ConstValSpecialRuntime;
|
|
ir_analyze_store_ptr(ira, &var_ptr->base, var_ptr, deref, false);
|
|
}
|
|
if (instr_is_comptime(var_ptr) && (is_comptime_var || (var_class_requires_const && var->gen_is_const))) {
|
|
return ir_const_void(ira, &decl_var_instruction->base.base);
|
|
}
|
|
} else if (is_comptime_var) {
|
|
ir_add_error(ira, &decl_var_instruction->base.base,
|
|
buf_sprintf("cannot store runtime value in compile time variable"));
|
|
var->var_type = ira->codegen->builtin_types.entry_invalid;
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
if (fn_entry)
|
|
fn_entry->variable_list.append(var);
|
|
|
|
return ir_build_var_decl_gen(ira, &decl_var_instruction->base.base, var, var_ptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_export(IrAnalyze *ira, IrInstSrcExport *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *options = instruction->options->child;
|
|
if (type_is_invalid(options->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *options_type = options->value->type;
|
|
assert(options_type->id == ZigTypeIdStruct);
|
|
|
|
TypeStructField *name_field = find_struct_type_field(options_type, buf_create_from_str("name"));
|
|
ir_assert(name_field != nullptr, &instruction->base.base);
|
|
IrInstGen *name_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, name_field);
|
|
if (type_is_invalid(name_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *linkage_field = find_struct_type_field(options_type, buf_create_from_str("linkage"));
|
|
ir_assert(linkage_field != nullptr, &instruction->base.base);
|
|
IrInstGen *linkage_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, linkage_field);
|
|
if (type_is_invalid(linkage_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *section_field = find_struct_type_field(options_type, buf_create_from_str("section"));
|
|
ir_assert(section_field != nullptr, &instruction->base.base);
|
|
IrInstGen *section_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, section_field);
|
|
if (type_is_invalid(section_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// The `section` field is optional, we have to unwrap it first
|
|
IrInstGen *non_null_check = ir_analyze_test_non_null(ira, &instruction->base.base, section_inst);
|
|
bool is_non_null;
|
|
if (!ir_resolve_bool(ira, non_null_check, &is_non_null))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *section_str_inst = nullptr;
|
|
if (is_non_null) {
|
|
section_str_inst = ir_analyze_optional_value_payload_value(ira, &instruction->base.base, section_inst, false);
|
|
if (type_is_invalid(section_str_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// Resolve all the comptime values
|
|
Buf *symbol_name = ir_resolve_str(ira, name_inst);
|
|
if (!symbol_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (buf_len(symbol_name) < 1) {
|
|
ir_add_error(ira, &name_inst->base,
|
|
buf_sprintf("exported symbol name cannot be empty"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
GlobalLinkageId global_linkage_id;
|
|
if (!ir_resolve_global_linkage(ira, linkage_inst, &global_linkage_id))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *section_name = nullptr;
|
|
if (section_str_inst != nullptr && !(section_name = ir_resolve_str(ira, section_str_inst)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// TODO: This function needs to be audited.
|
|
// It's not clear how all the different types are supposed to be handled.
|
|
// Need comprehensive tests for exporting one thing in one file and declaring an extern var
|
|
// in another file.
|
|
TldFn *tld_fn = heap::c_allocator.create<TldFn>();
|
|
tld_fn->base.id = TldIdFn;
|
|
tld_fn->base.source_node = instruction->base.base.source_node;
|
|
|
|
auto entry = ira->codegen->exported_symbol_names.put_unique(symbol_name, &tld_fn->base);
|
|
if (entry) {
|
|
AstNode *other_export_node = entry->value->source_node;
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("exported symbol collision: '%s'", buf_ptr(symbol_name)));
|
|
add_error_note(ira->codegen, msg, other_export_node, buf_sprintf("other symbol is here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Error err;
|
|
bool want_var_export = false;
|
|
switch (target->value->type->id) {
|
|
case ZigTypeIdInvalid:
|
|
case ZigTypeIdUnreachable:
|
|
zig_unreachable();
|
|
case ZigTypeIdFn: {
|
|
assert(target->value->data.x_ptr.special == ConstPtrSpecialFunction);
|
|
ZigFn *fn_entry = target->value->data.x_ptr.data.fn.fn_entry;
|
|
tld_fn->fn_entry = fn_entry;
|
|
CallingConvention cc = fn_entry->type_entry->data.fn.fn_type_id.cc;
|
|
switch (cc) {
|
|
case CallingConventionUnspecified: {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported function must specify calling convention"));
|
|
add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
|
|
} break;
|
|
case CallingConventionAsync: {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported function cannot be async"));
|
|
add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
|
|
} break;
|
|
case CallingConventionC:
|
|
case CallingConventionNaked:
|
|
case CallingConventionInterrupt:
|
|
case CallingConventionSignal:
|
|
case CallingConventionStdcall:
|
|
case CallingConventionFastcall:
|
|
case CallingConventionVectorcall:
|
|
case CallingConventionThiscall:
|
|
case CallingConventionAPCS:
|
|
case CallingConventionAAPCS:
|
|
case CallingConventionAAPCSVFP:
|
|
add_fn_export(ira->codegen, fn_entry, buf_ptr(symbol_name), global_linkage_id, cc);
|
|
fn_entry->section_name = section_name;
|
|
break;
|
|
}
|
|
} break;
|
|
case ZigTypeIdStruct:
|
|
if (is_slice(target->value->type)) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("unable to export value of type '%s'", buf_ptr(&target->value->type->name)));
|
|
} else if (target->value->type->data.structure.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported struct value must be declared extern"));
|
|
add_error_note(ira->codegen, msg, target->value->type->data.structure.decl_node, buf_sprintf("declared here"));
|
|
} else {
|
|
want_var_export = true;
|
|
}
|
|
break;
|
|
case ZigTypeIdUnion:
|
|
if (target->value->type->data.unionation.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported union value must be declared extern"));
|
|
add_error_note(ira->codegen, msg, target->value->type->data.unionation.decl_node, buf_sprintf("declared here"));
|
|
} else {
|
|
want_var_export = true;
|
|
}
|
|
break;
|
|
case ZigTypeIdEnum:
|
|
if (target->value->type->data.enumeration.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported enum value must be declared extern"));
|
|
add_error_note(ira->codegen, msg, target->value->type->data.enumeration.decl_node, buf_sprintf("declared here"));
|
|
} else {
|
|
want_var_export = true;
|
|
}
|
|
break;
|
|
case ZigTypeIdArray: {
|
|
bool ok_type;
|
|
if ((err = type_allowed_in_extern(ira->codegen, target->value->type->data.array.child_type, ExternPositionOther, &ok_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!ok_type) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("array element type '%s' not extern-compatible",
|
|
buf_ptr(&target->value->type->data.array.child_type->name)));
|
|
} else {
|
|
want_var_export = true;
|
|
}
|
|
break;
|
|
}
|
|
case ZigTypeIdMetaType: {
|
|
ZigType *type_value = target->value->data.x_type;
|
|
switch (type_value->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdStruct:
|
|
if (is_slice(type_value)) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("unable to export type '%s'", buf_ptr(&type_value->name)));
|
|
} else if (type_value->data.structure.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported struct must be declared extern"));
|
|
add_error_note(ira->codegen, msg, type_value->data.structure.decl_node, buf_sprintf("declared here"));
|
|
}
|
|
break;
|
|
case ZigTypeIdUnion:
|
|
if (type_value->data.unionation.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported union must be declared extern"));
|
|
add_error_note(ira->codegen, msg, type_value->data.unionation.decl_node, buf_sprintf("declared here"));
|
|
}
|
|
break;
|
|
case ZigTypeIdEnum:
|
|
if (type_value->data.enumeration.layout != ContainerLayoutExtern) {
|
|
ErrorMsg *msg = ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported enum must be declared extern"));
|
|
add_error_note(ira->codegen, msg, type_value->data.enumeration.decl_node, buf_sprintf("declared here"));
|
|
}
|
|
break;
|
|
case ZigTypeIdFn: {
|
|
if (type_value->data.fn.fn_type_id.cc == CallingConventionUnspecified) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("exported function type must specify calling convention"));
|
|
}
|
|
} break;
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdVector:
|
|
break;
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
|
|
break;
|
|
}
|
|
} break;
|
|
case ZigTypeIdInt:
|
|
want_var_export = true;
|
|
break;
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdVector:
|
|
zig_panic("TODO export const value of type %s", buf_ptr(&target->value->type->name));
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value->type->name)));
|
|
break;
|
|
}
|
|
|
|
// TODO audit the various ways to use @export
|
|
if (want_var_export && target->id == IrInstGenIdLoadPtr) {
|
|
IrInstGenLoadPtr *load_ptr = reinterpret_cast<IrInstGenLoadPtr *>(target);
|
|
if (load_ptr->ptr->id == IrInstGenIdVarPtr) {
|
|
IrInstGenVarPtr *var_ptr = reinterpret_cast<IrInstGenVarPtr *>(load_ptr->ptr);
|
|
ZigVar *var = var_ptr->var;
|
|
add_var_export(ira->codegen, var, buf_ptr(symbol_name), global_linkage_id);
|
|
var->section_name = section_name;
|
|
}
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static void add_link_lib_symbol(IrAnalyze *ira, Buf *lib_name, Buf *symbol_name, AstNode *source_node);
|
|
|
|
static IrInstGen *ir_analyze_instruction_extern(IrAnalyze *ira, IrInstSrcExtern *instruction) {
|
|
IrInstGen *type_inst = instruction->type->child;
|
|
if (type_is_invalid(type_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *options = instruction->options->child;
|
|
if (type_is_invalid(options->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *options_type = options->value->type;
|
|
assert(options_type->id == ZigTypeIdStruct);
|
|
|
|
TypeStructField *name_field = find_struct_type_field(options_type, buf_create_from_str("name"));
|
|
ir_assert(name_field != nullptr, &instruction->base.base);
|
|
IrInstGen *name_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, name_field);
|
|
if (type_is_invalid(name_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *linkage_field = find_struct_type_field(options_type, buf_create_from_str("linkage"));
|
|
ir_assert(linkage_field != nullptr, &instruction->base.base);
|
|
IrInstGen *linkage_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, linkage_field);
|
|
if (type_is_invalid(linkage_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *is_thread_local_field = find_struct_type_field(options_type, buf_create_from_str("is_thread_local"));
|
|
ir_assert(is_thread_local_field != nullptr, &instruction->base.base);
|
|
IrInstGen *is_thread_local_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, is_thread_local_field);
|
|
if (type_is_invalid(is_thread_local_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *library_name_field = find_struct_type_field(options_type, buf_create_from_str("library_name"));
|
|
ir_assert(library_name_field != nullptr, &instruction->base.base);
|
|
IrInstGen *library_name_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, library_name_field);
|
|
if (type_is_invalid(library_name_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// The `library_name` field is optional, we have to unwrap it first
|
|
IrInstGen *non_null_check = ir_analyze_test_non_null(ira, &instruction->base.base, library_name_inst);
|
|
bool is_non_null;
|
|
if (!ir_resolve_bool(ira, non_null_check, &is_non_null))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *library_name_val_inst = nullptr;
|
|
if (is_non_null) {
|
|
library_name_val_inst = ir_analyze_optional_value_payload_value(ira, &instruction->base.base, library_name_inst, false);
|
|
if (type_is_invalid(library_name_val_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// Resolve all the comptime values
|
|
ZigType *value_type = ir_resolve_type(ira, type_inst);
|
|
if (type_is_invalid(value_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (get_src_ptr_type(value_type) == nullptr) {
|
|
ir_add_error(ira, &name_inst->base,
|
|
buf_sprintf("expected (optional) pointer type or function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Buf *symbol_name = ir_resolve_str(ira, name_inst);
|
|
if (!symbol_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (buf_len(symbol_name) == 0) {
|
|
ir_add_error(ira, &name_inst->base,
|
|
buf_sprintf("extern symbol name cannot be empty"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Buf *library_name = nullptr;
|
|
if (library_name_val_inst) {
|
|
library_name = ir_resolve_str(ira, library_name_val_inst);
|
|
if (!library_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (buf_len(library_name) == 0) {
|
|
ir_add_error(ira, &library_name_inst->base,
|
|
buf_sprintf("library name name cannot be empty"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
add_link_lib_symbol(ira, library_name, symbol_name, instruction->base.base.source_node);
|
|
|
|
buf_destroy(library_name);
|
|
}
|
|
|
|
GlobalLinkageId global_linkage_id;
|
|
if (!ir_resolve_global_linkage(ira, linkage_inst, &global_linkage_id))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool is_thread_local;
|
|
if (!ir_resolve_bool(ira, is_thread_local_inst, &is_thread_local))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *expr_type = value_type;
|
|
if (global_linkage_id == GlobalLinkageIdWeak && value_type->id != ZigTypeIdOptional)
|
|
expr_type = get_optional_type(ira->codegen, expr_type);
|
|
|
|
// Create a bogus Tld object to keep track of the extern symbol.
|
|
// XXX: Find a better way to do this (in stage2).
|
|
TldFn *tld_fn = heap::c_allocator.create<TldFn>();
|
|
tld_fn->base.id = TldIdFn;
|
|
tld_fn->base.source_node = instruction->base.base.source_node;
|
|
|
|
auto entry = ira->codegen->external_symbol_names.put_unique(symbol_name, &tld_fn->base);
|
|
if (entry) {
|
|
AstNode *other_extern_node = entry->value->source_node;
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("extern symbol collision: '%s'", buf_ptr(symbol_name)));
|
|
add_error_note(ira->codegen, msg, other_extern_node, buf_sprintf("other symbol is here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_build_extern_gen(ira, &instruction->base.base, symbol_name, global_linkage_id,
|
|
is_thread_local, expr_type);
|
|
}
|
|
|
|
static bool exec_has_err_ret_trace(CodeGen *g, IrExecutableSrc *exec) {
|
|
ZigFn *fn_entry = exec_fn_entry(exec);
|
|
return fn_entry != nullptr && fn_entry->calls_or_awaits_errorable_fn && g->have_err_ret_tracing;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
|
|
IrInstSrcErrorReturnTrace *instruction)
|
|
{
|
|
ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
|
|
if (instruction->optional == IrInstErrorReturnTraceNull) {
|
|
ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
|
|
if (!exec_has_err_ret_trace(ira->codegen, ira->old_irb.exec)) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, optional_type);
|
|
ZigValue *out_val = result->value;
|
|
assert(get_src_ptr_type(optional_type) != nullptr);
|
|
out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
|
|
out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
|
|
return result;
|
|
}
|
|
return ir_build_error_return_trace_gen(ira, instruction->base.base.scope,
|
|
instruction->base.base.source_node, instruction->optional, optional_type);
|
|
} else {
|
|
assert(ira->codegen->have_err_ret_tracing);
|
|
return ir_build_error_return_trace_gen(ira, instruction->base.base.scope,
|
|
instruction->base.base.source_node, instruction->optional, ptr_to_stack_trace_type);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_error_union(IrAnalyze *ira, IrInstSrcErrorUnion *instruction) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueErrUnionType *lazy_err_union_type = heap::c_allocator.create<LazyValueErrUnionType>();
|
|
lazy_err_union_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_err_union_type->base;
|
|
lazy_err_union_type->base.id = LazyValueIdErrUnionType;
|
|
|
|
lazy_err_union_type->err_set_type = instruction->err_set->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_err_union_type->err_set_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
lazy_err_union_type->payload_type = instruction->payload->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_err_union_type->payload_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_alloca(IrAnalyze *ira, IrInst *source_inst, ZigType *var_type,
|
|
uint32_t align, const char *name_hint, bool force_comptime)
|
|
{
|
|
Error err;
|
|
|
|
ZigValue *pointee = ira->codegen->pass1_arena->create<ZigValue>();
|
|
pointee->special = ConstValSpecialUndef;
|
|
pointee->llvm_align = align;
|
|
|
|
IrInstGenAlloca *result = ir_build_alloca_gen(ira, source_inst, align, name_hint);
|
|
result->base.value->special = ConstValSpecialStatic;
|
|
result->base.value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result->base.value->data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer;
|
|
result->base.value->data.x_ptr.data.ref.pointee = pointee;
|
|
|
|
bool var_type_has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, var_type, &var_type_has_bits)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (align != 0) {
|
|
if ((err = type_resolve(ira->codegen, var_type, ResolveStatusAlignmentKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (!var_type_has_bits) {
|
|
ir_add_error(ira, source_inst,
|
|
buf_sprintf("variable '%s' of zero-bit type '%s' has no in-memory representation, it cannot be aligned",
|
|
name_hint, buf_ptr(&var_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
assert(result->base.value->data.x_ptr.special != ConstPtrSpecialInvalid);
|
|
|
|
pointee->type = var_type;
|
|
result->base.value->type = get_pointer_to_type_extra(ira->codegen, var_type, false, false,
|
|
PtrLenSingle, align, 0, 0, false);
|
|
|
|
if (!force_comptime) {
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
if (fn_entry != nullptr) {
|
|
fn_entry->alloca_gen_list.append(result);
|
|
}
|
|
}
|
|
return &result->base;
|
|
}
|
|
|
|
static ZigType *ir_result_loc_expected_type(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc)
|
|
{
|
|
switch (result_loc->id) {
|
|
case ResultLocIdInvalid:
|
|
case ResultLocIdPeerParent:
|
|
zig_unreachable();
|
|
case ResultLocIdNone:
|
|
case ResultLocIdVar:
|
|
case ResultLocIdBitCast:
|
|
case ResultLocIdCast:
|
|
return nullptr;
|
|
case ResultLocIdInstruction:
|
|
return result_loc->source_instruction->child->value->type;
|
|
case ResultLocIdReturn:
|
|
return ira->explicit_return_type;
|
|
case ResultLocIdPeer:
|
|
return reinterpret_cast<ResultLocPeer*>(result_loc)->parent->resolved_type;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static bool type_can_bit_cast(ZigType *t) {
|
|
switch (t->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdPointer:
|
|
return false;
|
|
default:
|
|
// TODO list these types out explicitly, there are probably some other invalid ones here
|
|
return true;
|
|
}
|
|
}
|
|
|
|
static void set_up_result_loc_for_inferred_comptime(IrAnalyze *ira, IrInstGen *ptr) {
|
|
ZigValue *undef_child = ira->codegen->pass1_arena->create<ZigValue>();
|
|
undef_child->type = ptr->value->type->data.pointer.child_type;
|
|
undef_child->special = ConstValSpecialUndef;
|
|
ptr->value->special = ConstValSpecialStatic;
|
|
ptr->value->data.x_ptr.mut = ConstPtrMutInfer;
|
|
ptr->value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
ptr->value->data.x_ptr.data.ref.pointee = undef_child;
|
|
}
|
|
|
|
static Error ir_result_has_type(IrAnalyze *ira, ResultLoc *result_loc, bool *out) {
|
|
switch (result_loc->id) {
|
|
case ResultLocIdInvalid:
|
|
case ResultLocIdPeerParent:
|
|
zig_unreachable();
|
|
case ResultLocIdNone:
|
|
case ResultLocIdPeer:
|
|
*out = false;
|
|
return ErrorNone;
|
|
case ResultLocIdReturn:
|
|
case ResultLocIdInstruction:
|
|
case ResultLocIdBitCast:
|
|
*out = true;
|
|
return ErrorNone;
|
|
case ResultLocIdCast: {
|
|
ResultLocCast *result_cast = reinterpret_cast<ResultLocCast *>(result_loc);
|
|
ZigType *dest_type = ir_resolve_type(ira, result_cast->base.source_instruction->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
*out = (dest_type != ira->codegen->builtin_types.entry_anytype);
|
|
return ErrorNone;
|
|
}
|
|
case ResultLocIdVar:
|
|
*out = reinterpret_cast<ResultLocVar *>(result_loc)->var->decl_node->data.variable_declaration.type != nullptr;
|
|
return ErrorNone;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_resolve_no_result_loc(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc, ZigType *value_type)
|
|
{
|
|
if (type_is_invalid(value_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGenAlloca *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
|
|
alloca_gen->base.value->type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
|
|
PtrLenSingle, 0, 0, 0, false);
|
|
set_up_result_loc_for_inferred_comptime(ira, &alloca_gen->base);
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
if (fn_entry != nullptr && get_scope_typeof(suspend_source_instr->scope) == nullptr) {
|
|
fn_entry->alloca_gen_list.append(alloca_gen);
|
|
}
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = &alloca_gen->base;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
|
|
static bool result_loc_is_discard(ResultLoc *result_loc_pass1) {
|
|
if (result_loc_pass1->id == ResultLocIdInstruction &&
|
|
result_loc_pass1->source_instruction->id == IrInstSrcIdConst)
|
|
{
|
|
IrInstSrcConst *const_inst = reinterpret_cast<IrInstSrcConst *>(result_loc_pass1->source_instruction);
|
|
if (value_is_comptime(const_inst->value) &&
|
|
const_inst->value->type->id == ZigTypeIdPointer &&
|
|
const_inst->value->data.x_ptr.special == ConstPtrSpecialDiscard)
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// when calling this function, at the callsite must check for result type noreturn and propagate it up
|
|
static IrInstGen *ir_resolve_result_raw(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime,
|
|
bool allow_discard)
|
|
{
|
|
Error err;
|
|
if (result_loc->resolved_loc != nullptr) {
|
|
// allow to redo the result location if the value is known and comptime and the previous one isn't
|
|
if (value == nullptr || !instr_is_comptime(value) || instr_is_comptime(result_loc->resolved_loc)) {
|
|
return result_loc->resolved_loc;
|
|
}
|
|
}
|
|
result_loc->gen_instruction = value;
|
|
result_loc->implicit_elem_type = value_type;
|
|
switch (result_loc->id) {
|
|
case ResultLocIdInvalid:
|
|
case ResultLocIdPeerParent:
|
|
zig_unreachable();
|
|
case ResultLocIdNone: {
|
|
if (value != nullptr) {
|
|
return nullptr;
|
|
}
|
|
// need to return a result location and don't have one. use a stack allocation
|
|
return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
|
|
}
|
|
case ResultLocIdVar: {
|
|
ResultLocVar *result_loc_var = reinterpret_cast<ResultLocVar *>(result_loc);
|
|
assert(result_loc->source_instruction->id == IrInstSrcIdAlloca);
|
|
IrInstSrcAlloca *alloca_src = reinterpret_cast<IrInstSrcAlloca *>(result_loc->source_instruction);
|
|
|
|
ZigVar *var = result_loc_var->var;
|
|
if (var->var_type != nullptr && !ir_get_var_is_comptime(var)) {
|
|
// This is at least the second time we've seen this variable declaration during analysis.
|
|
// This means that this is actually a different variable due to, e.g. an inline while loop.
|
|
// We make a new variable so that it can hold a different type, and so the debug info can
|
|
// be distinct.
|
|
ZigVar *new_var = create_local_var(ira->codegen, var->decl_node, var->child_scope,
|
|
buf_create_from_str(var->name), var->src_is_const, var->gen_is_const,
|
|
var->shadowable, var->is_comptime, true);
|
|
new_var->align_bytes = var->align_bytes;
|
|
|
|
var->next_var = new_var;
|
|
var = new_var;
|
|
}
|
|
if (value_type->id == ZigTypeIdUnreachable || value_type->id == ZigTypeIdOpaque) {
|
|
ir_add_error(ira, &result_loc->source_instruction->base,
|
|
buf_sprintf("variable of type '%s' not allowed", buf_ptr(&value_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (alloca_src->base.child == nullptr || var->ptr_instruction == nullptr) {
|
|
bool force_comptime;
|
|
if (!ir_resolve_comptime(ira, alloca_src->is_comptime->child, &force_comptime))
|
|
return ira->codegen->invalid_inst_gen;
|
|
uint32_t align = 0;
|
|
if (alloca_src->align != nullptr && !ir_resolve_align(ira, alloca_src->align->child, nullptr, &align)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *alloca_gen = ir_analyze_alloca(ira, &result_loc->source_instruction->base, value_type,
|
|
align, alloca_src->name_hint, force_comptime);
|
|
if (force_runtime) {
|
|
alloca_gen->value->data.x_ptr.mut = ConstPtrMutRuntimeVar;
|
|
alloca_gen->value->special = ConstValSpecialRuntime;
|
|
}
|
|
if (alloca_src->base.child != nullptr && !result_loc->written) {
|
|
alloca_src->base.child->base.ref_count = 0;
|
|
}
|
|
alloca_src->base.child = alloca_gen;
|
|
var->ptr_instruction = alloca_gen;
|
|
}
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = alloca_src->base.child;
|
|
return alloca_src->base.child;
|
|
}
|
|
case ResultLocIdInstruction: {
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = result_loc->source_instruction->child;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
case ResultLocIdReturn: {
|
|
if (value != nullptr) {
|
|
reinterpret_cast<ResultLocReturn *>(result_loc)->implicit_return_type_done = true;
|
|
ira->src_implicit_return_type_list.append(value);
|
|
}
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = ira->return_ptr;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
case ResultLocIdPeer: {
|
|
ResultLocPeer *result_peer = reinterpret_cast<ResultLocPeer *>(result_loc);
|
|
ResultLocPeerParent *peer_parent = result_peer->parent;
|
|
|
|
if (peer_parent->peers.length == 1) {
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
|
|
value_type, value, force_runtime, true);
|
|
result_peer->suspend_pos.basic_block_index = SIZE_MAX;
|
|
result_peer->suspend_pos.instruction_index = SIZE_MAX;
|
|
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
|
|
parent_result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = parent_result_loc;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
|
|
bool is_condition_comptime;
|
|
if (!ir_resolve_comptime(ira, peer_parent->is_comptime->child, &is_condition_comptime))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (is_condition_comptime) {
|
|
peer_parent->skipped = true;
|
|
return ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
|
|
value_type, value, force_runtime, true);
|
|
}
|
|
bool peer_parent_has_type;
|
|
if ((err = ir_result_has_type(ira, peer_parent->parent, &peer_parent_has_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (peer_parent_has_type) {
|
|
peer_parent->skipped = true;
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
|
|
value_type, value, force_runtime || !is_condition_comptime, true);
|
|
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
|
|
parent_result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
peer_parent->parent->written = true;
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = parent_result_loc;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
|
|
if (peer_parent->resolved_type == nullptr) {
|
|
if (peer_parent->end_bb->suspend_instruction_ref == nullptr) {
|
|
peer_parent->end_bb->suspend_instruction_ref = suspend_source_instr;
|
|
}
|
|
IrInstGen *unreach_inst = ira_suspend(ira, suspend_source_instr, result_peer->next_bb,
|
|
&result_peer->suspend_pos);
|
|
if (result_peer->next_bb == nullptr) {
|
|
ir_start_next_bb(ira);
|
|
}
|
|
return unreach_inst;
|
|
}
|
|
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
|
|
peer_parent->resolved_type, nullptr, force_runtime, true);
|
|
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
|
|
parent_result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
// because is_condition_comptime is false, we mark this a runtime pointer
|
|
parent_result_loc->value->special = ConstValSpecialRuntime;
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = parent_result_loc;
|
|
return result_loc->resolved_loc;
|
|
}
|
|
case ResultLocIdCast: {
|
|
ResultLocCast *result_cast = reinterpret_cast<ResultLocCast *>(result_loc);
|
|
ZigType *dest_type = ir_resolve_type(ira, result_cast->base.source_instruction->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type == ira->codegen->builtin_types.entry_anytype) {
|
|
return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
|
|
}
|
|
|
|
IrInstGen *casted_value;
|
|
if (value != nullptr) {
|
|
casted_value = ir_implicit_cast2(ira, suspend_source_instr, value, dest_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
dest_type = casted_value->value->type;
|
|
} else {
|
|
casted_value = nullptr;
|
|
}
|
|
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_cast->parent,
|
|
dest_type, casted_value, force_runtime, true);
|
|
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
|
|
parent_result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
|
|
ZigType *parent_ptr_type = parent_result_loc->value->type;
|
|
assert(parent_ptr_type->id == ZigTypeIdPointer);
|
|
|
|
if ((err = type_resolve(ira->codegen, parent_ptr_type->data.pointer.child_type,
|
|
ResolveStatusAlignmentKnown)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
uint64_t parent_ptr_align = get_ptr_align(ira->codegen, parent_ptr_type);
|
|
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusAlignmentKnown))) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!type_has_bits(ira->codegen, value_type)) {
|
|
parent_ptr_align = 0;
|
|
}
|
|
// If we're casting from a sentinel-terminated array to a non-sentinel-terminated array,
|
|
// we actually need the result location pointer to *not* have a sentinel. Otherwise the generated
|
|
// memcpy will write an extra byte to the destination, and THAT'S NO GOOD.
|
|
ZigType *ptr_elem_type;
|
|
if (value_type->id == ZigTypeIdArray && value_type->data.array.sentinel != nullptr &&
|
|
dest_type->id == ZigTypeIdArray && dest_type->data.array.sentinel == nullptr)
|
|
{
|
|
ptr_elem_type = get_array_type(ira->codegen, value_type->data.array.child_type,
|
|
value_type->data.array.len, nullptr);
|
|
} else {
|
|
ptr_elem_type = value_type;
|
|
}
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, ptr_elem_type,
|
|
parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle,
|
|
parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero);
|
|
|
|
ConstCastOnly const_cast_result = types_match_const_cast_only(ira,
|
|
parent_result_loc->value->type, ptr_type,
|
|
result_cast->base.source_instruction->base.source_node, false);
|
|
if (const_cast_result.id == ConstCastResultIdInvalid)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (const_cast_result.id != ConstCastResultIdOk) {
|
|
if (allow_discard) {
|
|
return parent_result_loc;
|
|
}
|
|
// We will not be able to provide a result location for this value. Create
|
|
// a new result location.
|
|
result_cast->parent->written = false;
|
|
return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
|
|
}
|
|
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc,
|
|
&parent_result_loc->base, ptr_type, &result_cast->base.source_instruction->base, false, false);
|
|
return result_loc->resolved_loc;
|
|
}
|
|
case ResultLocIdBitCast: {
|
|
ResultLocBitCast *result_bit_cast = reinterpret_cast<ResultLocBitCast *>(result_loc);
|
|
ZigType *dest_type = ir_resolve_type(ira, result_bit_cast->base.source_instruction->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *dest_cg_ptr_type;
|
|
if ((err = get_codegen_ptr_type(ira->codegen, dest_type, &dest_cg_ptr_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (dest_cg_ptr_type != nullptr) {
|
|
ir_add_error(ira, &result_loc->source_instruction->base,
|
|
buf_sprintf("unable to @bitCast to pointer type '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!type_can_bit_cast(dest_type)) {
|
|
ir_add_error(ira, &result_loc->source_instruction->base,
|
|
buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *value_cg_ptr_type;
|
|
if ((err = get_codegen_ptr_type(ira->codegen, value_type, &value_cg_ptr_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (value_cg_ptr_type != nullptr) {
|
|
ir_add_error(ira, suspend_source_instr,
|
|
buf_sprintf("unable to @bitCast from pointer type '%s'", buf_ptr(&value_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!type_can_bit_cast(value_type)) {
|
|
ir_add_error(ira, suspend_source_instr,
|
|
buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&value_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *bitcasted_value;
|
|
if (value != nullptr) {
|
|
bitcasted_value = ir_analyze_bit_cast(ira, &result_loc->source_instruction->base, value, dest_type);
|
|
dest_type = bitcasted_value->value->type;
|
|
} else {
|
|
bitcasted_value = nullptr;
|
|
}
|
|
|
|
if (bitcasted_value != nullptr && type_is_invalid(bitcasted_value->value->type)) {
|
|
return bitcasted_value;
|
|
}
|
|
|
|
bool parent_was_written = result_bit_cast->parent->written;
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_bit_cast->parent,
|
|
dest_type, bitcasted_value, force_runtime, true);
|
|
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
|
|
parent_result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
ZigType *parent_ptr_type = parent_result_loc->value->type;
|
|
assert(parent_ptr_type->id == ZigTypeIdPointer);
|
|
ZigType *child_type = parent_ptr_type->data.pointer.child_type;
|
|
|
|
if (result_loc_is_discard(result_bit_cast->parent)) {
|
|
assert(allow_discard);
|
|
return parent_result_loc;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, child_type, ResolveStatusSizeKnown))) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusSizeKnown))) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (child_type != ira->codegen->builtin_types.entry_anytype) {
|
|
if (type_size(ira->codegen, child_type) != type_size(ira->codegen, value_type)) {
|
|
// pointer cast won't work; we need a temporary location.
|
|
result_bit_cast->parent->written = parent_was_written;
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = ir_resolve_result(ira, suspend_source_instr, no_result_loc(),
|
|
value_type, bitcasted_value, force_runtime, true);
|
|
return result_loc->resolved_loc;
|
|
}
|
|
}
|
|
uint64_t parent_ptr_align = 0;
|
|
if (type_has_bits(ira->codegen, value_type)) parent_ptr_align = get_ptr_align(ira->codegen, parent_ptr_type);
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, value_type,
|
|
parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle,
|
|
parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero);
|
|
|
|
result_loc->written = true;
|
|
result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc,
|
|
&parent_result_loc->base, ptr_type, &result_bit_cast->base.source_instruction->base, false, false);
|
|
return result_loc->resolved_loc;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_resolve_result(IrAnalyze *ira, IrInst *suspend_source_instr,
|
|
ResultLoc *result_loc_pass1, ZigType *value_type, IrInstGen *value, bool force_runtime,
|
|
bool allow_discard)
|
|
{
|
|
if (!allow_discard && result_loc_is_discard(result_loc_pass1)) {
|
|
result_loc_pass1 = no_result_loc();
|
|
}
|
|
bool was_written = result_loc_pass1->written;
|
|
IrInstGen *result_loc = ir_resolve_result_raw(ira, suspend_source_instr, result_loc_pass1, value_type,
|
|
value, force_runtime, allow_discard);
|
|
if (result_loc == nullptr || result_loc->value->type->id == ZigTypeIdUnreachable ||
|
|
type_is_invalid(result_loc->value->type))
|
|
{
|
|
return result_loc;
|
|
}
|
|
|
|
if ((force_runtime || (value != nullptr && !instr_is_comptime(value))) &&
|
|
result_loc_pass1->written && result_loc->value->data.x_ptr.mut == ConstPtrMutInfer)
|
|
{
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
}
|
|
|
|
InferredStructField *isf = result_loc->value->type->data.pointer.inferred_struct_field;
|
|
if (isf != nullptr) {
|
|
TypeStructField *field;
|
|
IrInstGen *casted_ptr;
|
|
if (isf->already_resolved) {
|
|
field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
|
|
casted_ptr = result_loc;
|
|
} else {
|
|
isf->already_resolved = true;
|
|
// Now it's time to add the field to the struct type.
|
|
uint32_t old_field_count = isf->inferred_struct_type->data.structure.src_field_count;
|
|
uint32_t new_field_count = old_field_count + 1;
|
|
isf->inferred_struct_type->data.structure.src_field_count = new_field_count;
|
|
isf->inferred_struct_type->data.structure.fields = realloc_type_struct_fields(
|
|
isf->inferred_struct_type->data.structure.fields, old_field_count, new_field_count);
|
|
|
|
field = isf->inferred_struct_type->data.structure.fields[old_field_count];
|
|
field->name = isf->field_name;
|
|
field->type_entry = value_type;
|
|
field->type_val = create_const_type(ira->codegen, field->type_entry);
|
|
field->src_index = old_field_count;
|
|
field->decl_node = value ? value->base.source_node : suspend_source_instr->source_node;
|
|
if (value && instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefOk);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
field->is_comptime = true;
|
|
field->init_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
copy_const_val(ira->codegen, field->init_val, val);
|
|
return result_loc;
|
|
}
|
|
|
|
ZigType *struct_ptr_type = get_pointer_to_type(ira->codegen, isf->inferred_struct_type, false);
|
|
if (instr_is_comptime(result_loc)) {
|
|
casted_ptr = ir_const(ira, suspend_source_instr, struct_ptr_type);
|
|
copy_const_val(ira->codegen, casted_ptr->value, result_loc->value);
|
|
casted_ptr->value->type = struct_ptr_type;
|
|
} else {
|
|
casted_ptr = result_loc;
|
|
}
|
|
if (instr_is_comptime(casted_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
|
|
if (!ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
ZigValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val,
|
|
suspend_source_instr->source_node);
|
|
struct_val->special = ConstValSpecialStatic;
|
|
struct_val->data.x_struct.fields = realloc_const_vals_ptrs(ira->codegen,
|
|
struct_val->data.x_struct.fields, old_field_count, new_field_count);
|
|
|
|
ZigValue *field_val = struct_val->data.x_struct.fields[old_field_count];
|
|
field_val->special = ConstValSpecialUndef;
|
|
field_val->type = field->type_entry;
|
|
field_val->parent.id = ConstParentIdStruct;
|
|
field_val->parent.data.p_struct.struct_val = struct_val;
|
|
field_val->parent.data.p_struct.field_index = old_field_count;
|
|
}
|
|
}
|
|
}
|
|
|
|
result_loc = ir_analyze_struct_field_ptr(ira, suspend_source_instr, field, casted_ptr,
|
|
isf->inferred_struct_type, true);
|
|
result_loc_pass1->resolved_loc = result_loc;
|
|
}
|
|
|
|
if (was_written) {
|
|
return result_loc;
|
|
}
|
|
|
|
ir_assert(result_loc->value->type->id == ZigTypeIdPointer, suspend_source_instr);
|
|
ZigType *actual_elem_type = result_loc->value->type->data.pointer.child_type;
|
|
if (actual_elem_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional &&
|
|
value_type->id != ZigTypeIdNull && value_type->id != ZigTypeIdUndefined)
|
|
{
|
|
bool same_comptime_repr = types_have_same_zig_comptime_repr(ira->codegen, actual_elem_type, value_type);
|
|
if (!same_comptime_repr) {
|
|
result_loc_pass1->written = was_written;
|
|
return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, result_loc, false, true);
|
|
}
|
|
} else if (actual_elem_type->id == ZigTypeIdErrorUnion && value_type->id != ZigTypeIdErrorUnion &&
|
|
value_type->id != ZigTypeIdUndefined)
|
|
{
|
|
if (value_type->id == ZigTypeIdErrorSet) {
|
|
return ir_analyze_unwrap_err_code(ira, suspend_source_instr, result_loc, true);
|
|
} else {
|
|
IrInstGen *unwrapped_err_ptr = ir_analyze_unwrap_error_payload(ira, suspend_source_instr,
|
|
result_loc, false, true);
|
|
ZigType *actual_payload_type = actual_elem_type->data.error_union.payload_type;
|
|
if (actual_payload_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional &&
|
|
value_type->id != ZigTypeIdNull && value_type->id != ZigTypeIdUndefined)
|
|
{
|
|
return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, unwrapped_err_ptr, false, true);
|
|
} else {
|
|
return unwrapped_err_ptr;
|
|
}
|
|
}
|
|
}
|
|
return result_loc;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrInstSrcResolveResult *instruction) {
|
|
ZigType *implicit_elem_type;
|
|
if (instruction->ty == nullptr) {
|
|
if (instruction->result_loc->id == ResultLocIdCast) {
|
|
implicit_elem_type = ir_resolve_type(ira,
|
|
instruction->result_loc->source_instruction->child);
|
|
if (type_is_invalid(implicit_elem_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (instruction->result_loc->id == ResultLocIdReturn) {
|
|
implicit_elem_type = ira->explicit_return_type;
|
|
if (type_is_invalid(implicit_elem_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
implicit_elem_type = ira->codegen->builtin_types.entry_anytype;
|
|
}
|
|
if (implicit_elem_type == ira->codegen->builtin_types.entry_anytype) {
|
|
Buf *bare_name = buf_alloc();
|
|
Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
|
|
instruction->base.base.scope, instruction->base.base.source_node, bare_name);
|
|
|
|
StructSpecial struct_special = StructSpecialInferredStruct;
|
|
if (instruction->base.base.source_node->type == NodeTypeContainerInitExpr &&
|
|
instruction->base.base.source_node->data.container_init_expr.kind == ContainerInitKindArray)
|
|
{
|
|
struct_special = StructSpecialInferredTuple;
|
|
}
|
|
|
|
ZigType *inferred_struct_type = get_partial_container_type(ira->codegen,
|
|
instruction->base.base.scope, ContainerKindStruct, instruction->base.base.source_node,
|
|
buf_ptr(name), bare_name, ContainerLayoutAuto);
|
|
inferred_struct_type->data.structure.special = struct_special;
|
|
inferred_struct_type->data.structure.resolve_status = ResolveStatusBeingInferred;
|
|
implicit_elem_type = inferred_struct_type;
|
|
}
|
|
} else {
|
|
implicit_elem_type = ir_resolve_type(ira, instruction->ty->child);
|
|
if (type_is_invalid(implicit_elem_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
|
implicit_elem_type, nullptr, false, true);
|
|
if (result_loc != nullptr)
|
|
return result_loc;
|
|
|
|
ZigFn *fn = ira->new_irb.exec->fn_entry;
|
|
if (fn != nullptr && fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync &&
|
|
instruction->result_loc->id == ResultLocIdReturn)
|
|
{
|
|
result_loc = ir_resolve_result(ira, &instruction->base.base, no_result_loc(),
|
|
implicit_elem_type, nullptr, false, true);
|
|
if (result_loc != nullptr &&
|
|
(type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable))
|
|
{
|
|
return result_loc;
|
|
}
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
return result_loc;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, implicit_elem_type);
|
|
result->value->special = ConstValSpecialUndef;
|
|
IrInstGen *ptr = ir_get_ref(ira, &instruction->base.base, result, false, false);
|
|
ptr->value->data.x_ptr.mut = ConstPtrMutComptimeVar;
|
|
return ptr;
|
|
}
|
|
|
|
static void ir_reset_result(ResultLoc *result_loc) {
|
|
result_loc->written = false;
|
|
result_loc->resolved_loc = nullptr;
|
|
result_loc->gen_instruction = nullptr;
|
|
result_loc->implicit_elem_type = nullptr;
|
|
switch (result_loc->id) {
|
|
case ResultLocIdInvalid:
|
|
zig_unreachable();
|
|
case ResultLocIdPeerParent: {
|
|
ResultLocPeerParent *peer_parent = reinterpret_cast<ResultLocPeerParent *>(result_loc);
|
|
peer_parent->skipped = false;
|
|
peer_parent->done_resuming = false;
|
|
peer_parent->resolved_type = nullptr;
|
|
for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
|
|
ir_reset_result(&peer_parent->peers.at(i)->base);
|
|
}
|
|
break;
|
|
}
|
|
case ResultLocIdVar: {
|
|
IrInstSrcAlloca *alloca_src = reinterpret_cast<IrInstSrcAlloca *>(result_loc->source_instruction);
|
|
alloca_src->base.child = nullptr;
|
|
break;
|
|
}
|
|
case ResultLocIdReturn:
|
|
reinterpret_cast<ResultLocReturn *>(result_loc)->implicit_return_type_done = false;
|
|
break;
|
|
case ResultLocIdPeer:
|
|
case ResultLocIdNone:
|
|
case ResultLocIdInstruction:
|
|
case ResultLocIdBitCast:
|
|
case ResultLocIdCast:
|
|
break;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInstSrcResetResult *instruction) {
|
|
ir_reset_result(instruction->result_loc);
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *get_async_call_result_loc(IrAnalyze *ira, IrInst* source_instr,
|
|
ZigType *fn_ret_type, bool is_async_call_builtin, IrInstGen **args_ptr, size_t args_len,
|
|
IrInstGen *ret_ptr_uncasted)
|
|
{
|
|
ir_assert(is_async_call_builtin, source_instr);
|
|
if (type_is_invalid(ret_ptr_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ret_ptr_uncasted->value->type->id == ZigTypeIdVoid) {
|
|
// Result location will be inside the async frame.
|
|
return nullptr;
|
|
}
|
|
return ir_implicit_cast(ira, ret_ptr_uncasted, get_pointer_to_type(ira->codegen, fn_ret_type, false));
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_async_call(IrAnalyze *ira, IrInst* source_instr, ZigFn *fn_entry,
|
|
ZigType *fn_type, IrInstGen *fn_ref, IrInstGen **casted_args, size_t arg_count,
|
|
IrInstGen *casted_new_stack, bool is_async_call_builtin, IrInstGen *ret_ptr_uncasted,
|
|
ResultLoc *call_result_loc)
|
|
{
|
|
if (fn_entry == nullptr) {
|
|
if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
|
|
ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (casted_new_stack == nullptr) {
|
|
ir_add_error(ira, &fn_ref->base, buf_sprintf("function is not comptime-known; @asyncCall required"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
if (casted_new_stack != nullptr) {
|
|
ZigType *fn_ret_type = fn_type->data.fn.fn_type_id.return_type;
|
|
IrInstGen *ret_ptr = get_async_call_result_loc(ira, source_instr, fn_ret_type, is_async_call_builtin,
|
|
casted_args, arg_count, ret_ptr_uncasted);
|
|
if (ret_ptr != nullptr && type_is_invalid(ret_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_ret_type);
|
|
|
|
IrInstGenCall *call_gen = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
|
|
arg_count, casted_args, CallModifierAsync, casted_new_stack,
|
|
is_async_call_builtin, ret_ptr, anyframe_type);
|
|
return &call_gen->base;
|
|
} else {
|
|
ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
|
|
IrInstGen *result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
|
|
frame_type, nullptr, true, false);
|
|
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc;
|
|
}
|
|
result_loc = ir_implicit_cast2(ira, source_instr, result_loc,
|
|
get_pointer_to_type(ira->codegen, frame_type, false));
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return &ir_build_call_gen(ira, source_instr, fn_entry, fn_ref, arg_count,
|
|
casted_args, CallModifierAsync, casted_new_stack,
|
|
is_async_call_builtin, result_loc, frame_type)->base;
|
|
}
|
|
}
|
|
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
|
|
IrInstGen *arg, Scope **exec_scope, size_t *next_proto_i)
|
|
{
|
|
AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(*next_proto_i);
|
|
assert(param_decl_node->type == NodeTypeParamDecl);
|
|
|
|
IrInstGen *casted_arg;
|
|
if (param_decl_node->data.param_decl.anytype_token == nullptr) {
|
|
AstNode *param_type_node = param_decl_node->data.param_decl.type;
|
|
ZigType *param_type = ir_analyze_type_expr(ira, *exec_scope, param_type_node);
|
|
if (type_is_invalid(param_type))
|
|
return false;
|
|
|
|
casted_arg = ir_implicit_cast(ira, arg, param_type);
|
|
if (type_is_invalid(casted_arg->value->type))
|
|
return false;
|
|
} else {
|
|
casted_arg = arg;
|
|
}
|
|
|
|
ZigValue *arg_val = ir_resolve_const(ira, casted_arg, UndefOk);
|
|
if (!arg_val)
|
|
return false;
|
|
|
|
Buf *param_name = param_decl_node->data.param_decl.name;
|
|
ZigVar *var = add_variable(ira->codegen, param_decl_node,
|
|
*exec_scope, param_name, true, arg_val, nullptr, arg_val->type);
|
|
*exec_scope = var->child_scope;
|
|
*next_proto_i += 1;
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_node,
|
|
IrInstGen *arg, IrInst *arg_src, Scope **child_scope, size_t *next_proto_i,
|
|
GenericFnTypeId *generic_id, FnTypeId *fn_type_id, IrInstGen **casted_args,
|
|
ZigFn *impl_fn)
|
|
{
|
|
AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(*next_proto_i);
|
|
assert(param_decl_node->type == NodeTypeParamDecl);
|
|
bool is_var_args = param_decl_node->data.param_decl.is_var_args;
|
|
bool arg_part_of_generic_id = false;
|
|
IrInstGen *casted_arg;
|
|
if (is_var_args) {
|
|
arg_part_of_generic_id = true;
|
|
casted_arg = arg;
|
|
} else {
|
|
if (param_decl_node->data.param_decl.anytype_token == nullptr) {
|
|
AstNode *param_type_node = param_decl_node->data.param_decl.type;
|
|
ZigType *param_type = ir_analyze_type_expr(ira, *child_scope, param_type_node);
|
|
if (type_is_invalid(param_type))
|
|
return false;
|
|
|
|
casted_arg = ir_implicit_cast2(ira, arg_src, arg, param_type);
|
|
if (type_is_invalid(casted_arg->value->type))
|
|
return false;
|
|
} else {
|
|
arg_part_of_generic_id = true;
|
|
casted_arg = arg;
|
|
}
|
|
}
|
|
|
|
bool comptime_arg = param_decl_node->data.param_decl.is_comptime;
|
|
if (!comptime_arg) {
|
|
switch (type_requires_comptime(ira->codegen, casted_arg->value->type)) {
|
|
case ReqCompTimeInvalid:
|
|
return false;
|
|
case ReqCompTimeYes:
|
|
comptime_arg = true;
|
|
break;
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
}
|
|
|
|
ZigValue *arg_val;
|
|
|
|
if (comptime_arg && !instr_is_comptime(casted_arg)) {
|
|
ir_add_error(ira, &casted_arg->base,
|
|
buf_sprintf("runtime value cannot be passed to comptime arg"));
|
|
return false;
|
|
}
|
|
if (comptime_arg) {
|
|
arg_part_of_generic_id = true;
|
|
arg_val = ir_resolve_const(ira, casted_arg, UndefBad);
|
|
if (!arg_val)
|
|
return false;
|
|
} else {
|
|
arg_val = create_const_runtime(ira->codegen, casted_arg->value->type);
|
|
}
|
|
if (arg_part_of_generic_id) {
|
|
copy_const_val(ira->codegen, &generic_id->params[generic_id->param_count], arg_val);
|
|
generic_id->param_count += 1;
|
|
}
|
|
|
|
Buf *param_name = param_decl_node->data.param_decl.name;
|
|
if (!param_name) return false;
|
|
if (!is_var_args) {
|
|
ZigVar *var = add_variable(ira->codegen, param_decl_node,
|
|
*child_scope, param_name, true, arg_val, nullptr, arg_val->type);
|
|
*child_scope = var->child_scope;
|
|
var->shadowable = !comptime_arg;
|
|
|
|
*next_proto_i += 1;
|
|
} else if (casted_arg->value->type->id == ZigTypeIdComptimeInt ||
|
|
casted_arg->value->type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
ir_add_error(ira, &casted_arg->base,
|
|
buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557"));
|
|
return false;
|
|
}
|
|
|
|
if (!comptime_arg) {
|
|
casted_args[fn_type_id->param_count] = casted_arg;
|
|
FnTypeParamInfo *param_info = &fn_type_id->param_info[fn_type_id->param_count];
|
|
param_info->type = casted_arg->value->type;
|
|
param_info->is_noalias = param_decl_node->data.param_decl.is_noalias;
|
|
impl_fn->param_source_nodes[fn_type_id->param_count] = param_decl_node;
|
|
fn_type_id->param_count += 1;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var) {
|
|
while (var->next_var != nullptr) {
|
|
var = var->next_var;
|
|
}
|
|
|
|
if (var->var_type == nullptr || type_is_invalid(var->var_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool is_volatile = false;
|
|
ZigType *var_ptr_type = get_pointer_to_type_extra(ira->codegen, var->var_type,
|
|
var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0, false);
|
|
|
|
if (var->ptr_instruction != nullptr) {
|
|
return ir_implicit_cast(ira, var->ptr_instruction, var_ptr_type);
|
|
}
|
|
|
|
bool comptime_var_mem = ir_get_var_is_comptime(var);
|
|
bool linkage_makes_it_runtime = var->decl_node->data.variable_declaration.is_extern;
|
|
|
|
IrInstGen *result = ir_build_var_ptr_gen(ira, source_instr, var);
|
|
result->value->type = var_ptr_type;
|
|
|
|
if (!linkage_makes_it_runtime && !var->is_thread_local && value_is_comptime(var->const_value)) {
|
|
ZigValue *val = var->const_value;
|
|
switch (val->special) {
|
|
case ConstValSpecialRuntime:
|
|
break;
|
|
case ConstValSpecialStatic: // fallthrough
|
|
case ConstValSpecialLazy: // fallthrough
|
|
case ConstValSpecialUndef: {
|
|
ConstPtrMut ptr_mut;
|
|
if (comptime_var_mem) {
|
|
ptr_mut = ConstPtrMutComptimeVar;
|
|
} else if (var->gen_is_const) {
|
|
ptr_mut = ConstPtrMutComptimeConst;
|
|
} else {
|
|
assert(!comptime_var_mem);
|
|
ptr_mut = ConstPtrMutRuntimeVar;
|
|
}
|
|
result->value->special = ConstValSpecialStatic;
|
|
result->value->data.x_ptr.mut = ptr_mut;
|
|
result->value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result->value->data.x_ptr.data.ref.pointee = val;
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr);
|
|
result->value->data.rh_ptr = in_fn_scope ? RuntimeHintPtrStack : RuntimeHintPtrNonStack;
|
|
|
|
return result;
|
|
}
|
|
|
|
// This function is called when a comptime value becomes accessible at runtime.
|
|
static void mark_comptime_value_escape(IrAnalyze *ira, IrInst* source_instr, ZigValue *val) {
|
|
ir_assert(value_is_comptime(val), source_instr);
|
|
if (val->special == ConstValSpecialUndef)
|
|
return;
|
|
|
|
if (val->type->id == ZigTypeIdFn && val->type->data.fn.fn_type_id.cc == CallingConventionUnspecified) {
|
|
ir_assert(val->data.x_ptr.special == ConstPtrSpecialFunction, source_instr);
|
|
if (val->data.x_ptr.data.fn.fn_entry->non_async_node == nullptr) {
|
|
val->data.x_ptr.data.fn.fn_entry->non_async_node = source_instr->source_node;
|
|
}
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_store_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *ptr, IrInstGen *uncasted_value, bool allow_write_through_const)
|
|
{
|
|
assert(ptr->value->type->id == ZigTypeIdPointer);
|
|
|
|
if (ptr->value->data.x_ptr.special == ConstPtrSpecialDiscard) {
|
|
if (uncasted_value->value->type->id == ZigTypeIdErrorUnion ||
|
|
uncasted_value->value->type->id == ZigTypeIdErrorSet)
|
|
{
|
|
ir_add_error(ira, source_instr, buf_sprintf("error is discarded. consider using `try`, `catch`, or `if`"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return ir_const_void(ira, source_instr);
|
|
}
|
|
|
|
if (ptr->value->type->data.pointer.is_const && !allow_write_through_const) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_type = ptr->value->type->data.pointer.child_type;
|
|
IrInstGen *value = ir_implicit_cast(ira, uncasted_value, child_type);
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
switch (type_has_one_possible_value(ira->codegen, child_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_void(ira, source_instr);
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
if (instr_is_comptime(ptr) && ptr->value->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
if (!allow_write_through_const && ptr->value->data.x_ptr.mut == ConstPtrMutComptimeConst) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if ((allow_write_through_const && ptr->value->data.x_ptr.mut == ConstPtrMutComptimeConst) ||
|
|
ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar ||
|
|
ptr->value->data.x_ptr.mut == ConstPtrMutInfer)
|
|
{
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *dest_val = const_ptr_pointee(ira, ira->codegen, ptr->value, source_instr->source_node);
|
|
if (dest_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (dest_val->special != ConstValSpecialRuntime) {
|
|
copy_const_val(ira->codegen, dest_val, value->value);
|
|
|
|
if (ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar &&
|
|
!ira->new_irb.current_basic_block->must_be_comptime_source_instr)
|
|
{
|
|
ira->new_irb.current_basic_block->must_be_comptime_source_instr = source_instr;
|
|
}
|
|
return ir_const_void(ira, source_instr);
|
|
}
|
|
}
|
|
if (ptr->value->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
ptr->value->special = ConstValSpecialRuntime;
|
|
} else {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("cannot store runtime value in compile time variable"));
|
|
ZigValue *dest_val = const_ptr_pointee_unchecked(ira->codegen, ptr->value);
|
|
dest_val->type = ira->codegen->builtin_types.entry_invalid;
|
|
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (ptr->value->type->data.pointer.inferred_struct_field != nullptr &&
|
|
child_type == ira->codegen->builtin_types.entry_anytype)
|
|
{
|
|
child_type = ptr->value->type->data.pointer.inferred_struct_field->inferred_struct_type;
|
|
}
|
|
|
|
switch (type_requires_comptime(ira->codegen, child_type)) {
|
|
case ReqCompTimeInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeYes:
|
|
switch (type_has_one_possible_value(ira->codegen, ptr->value->type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueNo:
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("cannot store runtime value in type '%s'", buf_ptr(&child_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_void(ira, source_instr);
|
|
}
|
|
zig_unreachable();
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
|
|
if (instr_is_comptime(value)) {
|
|
mark_comptime_value_escape(ira, source_instr, value->value);
|
|
}
|
|
|
|
// If this is a store to a pointer with a runtime-known vector index,
|
|
// we have to figure out the IrInstGen which represents the index and
|
|
// emit a IrInstGenVectorStoreElem, or emit a compile error
|
|
// explaining why it is impossible for this store to work. Which is that
|
|
// the pointer address is of the vector; without the element index being known
|
|
// we cannot properly perform the insertion.
|
|
if (ptr->value->type->data.pointer.vector_index == VECTOR_INDEX_RUNTIME) {
|
|
if (ptr->id == IrInstGenIdElemPtr) {
|
|
IrInstGenElemPtr *elem_ptr = (IrInstGenElemPtr *)ptr;
|
|
return ir_build_vector_store_elem(ira, source_instr, elem_ptr->array_ptr,
|
|
elem_ptr->elem_index, value);
|
|
}
|
|
ir_add_error(ira, &ptr->base,
|
|
buf_sprintf("unable to determine vector element index of type '%s'",
|
|
buf_ptr(&ptr->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_build_store_ptr_gen(ira, source_instr, ptr, value);
|
|
}
|
|
|
|
static IrInstGen *analyze_casted_new_stack(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *new_stack, IrInst *new_stack_src, bool is_async_call_builtin, ZigFn *fn_entry)
|
|
{
|
|
if (new_stack == nullptr)
|
|
return nullptr;
|
|
|
|
if (!is_async_call_builtin &&
|
|
arch_stack_pointer_register_name(ira->codegen->zig_target->arch) == nullptr)
|
|
{
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("target arch '%s' does not support calling with a new stack",
|
|
target_arch_name(ira->codegen->zig_target->arch)));
|
|
}
|
|
|
|
if (is_async_call_builtin &&
|
|
fn_entry != nullptr && new_stack->value->type->id == ZigTypeIdPointer &&
|
|
new_stack->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
|
|
{
|
|
ZigType *needed_frame_type = get_pointer_to_type(ira->codegen,
|
|
get_fn_frame_type(ira->codegen, fn_entry), false);
|
|
return ir_implicit_cast(ira, new_stack, needed_frame_type);
|
|
} else {
|
|
ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
false, false, PtrLenUnknown, target_fn_align(ira->codegen->zig_target), 0, 0, false);
|
|
ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
|
|
ira->codegen->need_frame_size_prefix_data = true;
|
|
return ir_implicit_cast2(ira, new_stack_src, new_stack, u8_slice);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
|
|
ZigFn *fn_entry, ZigType *fn_type, IrInstGen *fn_ref,
|
|
IrInstGen *first_arg_ptr, IrInst *first_arg_ptr_src, CallModifier modifier,
|
|
IrInstGen *new_stack, IrInst *new_stack_src, bool is_async_call_builtin,
|
|
IrInstGen **args_ptr, size_t args_len, IrInstGen *ret_ptr, ResultLoc *call_result_loc)
|
|
{
|
|
Error err;
|
|
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
|
|
size_t first_arg_1_or_0 = first_arg_ptr ? 1 : 0;
|
|
|
|
// for extern functions, the var args argument is not counted.
|
|
// for zig functions, it is.
|
|
size_t var_args_1_or_0;
|
|
if (fn_type_id->cc == CallingConventionC) {
|
|
var_args_1_or_0 = 0;
|
|
} else {
|
|
var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0;
|
|
}
|
|
size_t src_param_count = fn_type_id->param_count - var_args_1_or_0;
|
|
size_t call_param_count = args_len + first_arg_1_or_0;
|
|
AstNode *source_node = source_instr->source_node;
|
|
|
|
AstNode *fn_proto_node = fn_entry ? fn_entry->proto_node : nullptr;;
|
|
|
|
if (fn_type_id->cc == CallingConventionNaked) {
|
|
ErrorMsg *msg = ir_add_error(ira, &fn_ref->base, buf_sprintf("unable to call function with naked calling convention"));
|
|
if (fn_proto_node) {
|
|
add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (fn_type_id->is_var_args) {
|
|
if (call_param_count < src_param_count) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("expected at least %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize "",
|
|
src_param_count, call_param_count));
|
|
if (fn_proto_node) {
|
|
add_error_note(ira->codegen, msg, fn_proto_node,
|
|
buf_sprintf("declared here"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (src_param_count != call_param_count) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("expected %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize "",
|
|
src_param_count, call_param_count));
|
|
if (fn_proto_node) {
|
|
add_error_note(ira->codegen, msg, fn_proto_node,
|
|
buf_sprintf("declared here"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (modifier == CallModifierCompileTime) {
|
|
// If we are evaluating an extern function in a TypeOf call, we can return an undefined value
|
|
// of its return type.
|
|
if (fn_entry != nullptr && get_scope_typeof(source_instr->scope) != nullptr &&
|
|
fn_proto_node->data.fn_proto.is_extern) {
|
|
|
|
assert(fn_entry->body_node == nullptr);
|
|
AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
|
|
ZigType *return_type = ir_analyze_type_expr(ira, source_instr->scope, return_type_node);
|
|
if (type_is_invalid(return_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_undef(ira, source_instr, return_type);
|
|
}
|
|
|
|
// No special handling is needed for compile time evaluation of generic functions.
|
|
if (!fn_entry || fn_entry->body_node == nullptr) {
|
|
ir_add_error(ira, &fn_ref->base, buf_sprintf("unable to evaluate constant expression"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!ir_emit_backward_branch(ira, source_instr))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Fork a scope of the function with known values for the parameters.
|
|
Scope *exec_scope = &fn_entry->fndef_scope->base;
|
|
|
|
size_t next_proto_i = 0;
|
|
if (first_arg_ptr) {
|
|
assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);
|
|
|
|
bool first_arg_known_bare = false;
|
|
if (fn_type_id->next_param_index >= 1) {
|
|
ZigType *param_type = fn_type_id->param_info[next_proto_i].type;
|
|
if (type_is_invalid(param_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
first_arg_known_bare = param_type->id != ZigTypeIdPointer;
|
|
}
|
|
|
|
IrInstGen *first_arg;
|
|
if (!first_arg_known_bare) {
|
|
first_arg = first_arg_ptr;
|
|
} else {
|
|
first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
|
|
if (type_is_invalid(first_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!ir_analyze_fn_call_inline_arg(ira, fn_proto_node, first_arg, &exec_scope, &next_proto_i))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
for (size_t call_i = 0; call_i < args_len; call_i += 1) {
|
|
IrInstGen *old_arg = args_ptr[call_i];
|
|
|
|
if (!ir_analyze_fn_call_inline_arg(ira, fn_proto_node, old_arg, &exec_scope, &next_proto_i))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
|
|
if (return_type_node == nullptr) {
|
|
ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *specified_return_type = ir_analyze_type_expr(ira, exec_scope, return_type_node);
|
|
if (type_is_invalid(specified_return_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigType *return_type;
|
|
ZigType *inferred_err_set_type = nullptr;
|
|
if (fn_proto_node->data.fn_proto.auto_err_set) {
|
|
inferred_err_set_type = get_auto_err_set_type(ira->codegen, fn_entry);
|
|
if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type);
|
|
} else {
|
|
return_type = specified_return_type;
|
|
}
|
|
|
|
bool cacheable = fn_eval_cacheable(exec_scope, return_type);
|
|
ZigValue *result = nullptr;
|
|
if (cacheable) {
|
|
auto entry = ira->codegen->memoized_fn_eval_table.maybe_get(exec_scope);
|
|
if (entry)
|
|
result = entry->value;
|
|
}
|
|
|
|
if (result == nullptr) {
|
|
// Analyze the fn body block like any other constant expression.
|
|
AstNode *body_node = fn_entry->body_node;
|
|
ZigValue *result_ptr;
|
|
create_result_ptr(ira->codegen, return_type, &result, &result_ptr);
|
|
|
|
if ((err = ir_eval_const_value(ira->codegen, exec_scope, body_node, result_ptr,
|
|
ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
|
|
fn_entry, nullptr, source_instr->source_node, nullptr, ira->new_irb.exec, return_type_node,
|
|
UndefOk)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (inferred_err_set_type != nullptr) {
|
|
inferred_err_set_type->data.error_set.incomplete = false;
|
|
if (result->type->id == ZigTypeIdErrorUnion) {
|
|
ErrorTableEntry *err = result->data.x_err_union.error_set->data.x_err_set;
|
|
if (err != nullptr) {
|
|
inferred_err_set_type->data.error_set.err_count = 1;
|
|
inferred_err_set_type->data.error_set.errors = heap::c_allocator.create<ErrorTableEntry *>();
|
|
inferred_err_set_type->data.error_set.errors[0] = err;
|
|
}
|
|
ZigType *fn_inferred_err_set_type = result->type->data.error_union.err_set_type;
|
|
inferred_err_set_type->data.error_set.err_count = fn_inferred_err_set_type->data.error_set.err_count;
|
|
inferred_err_set_type->data.error_set.errors = fn_inferred_err_set_type->data.error_set.errors;
|
|
} else if (result->type->id == ZigTypeIdErrorSet) {
|
|
inferred_err_set_type->data.error_set.err_count = result->type->data.error_set.err_count;
|
|
inferred_err_set_type->data.error_set.errors = result->type->data.error_set.errors;
|
|
}
|
|
}
|
|
|
|
if (cacheable) {
|
|
ira->codegen->memoized_fn_eval_table.put(exec_scope, result);
|
|
}
|
|
|
|
if (type_is_invalid(result->type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *new_instruction = ir_const_move(ira, source_instr, result);
|
|
return ir_finish_anal(ira, new_instruction);
|
|
}
|
|
|
|
if (fn_type->data.fn.is_generic) {
|
|
if (!fn_entry) {
|
|
ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("calling a generic function requires compile-time known function value"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
size_t new_fn_arg_count = first_arg_1_or_0 + args_len;
|
|
|
|
IrInstGen **casted_args = heap::c_allocator.allocate<IrInstGen *>(new_fn_arg_count);
|
|
|
|
// Fork a scope of the function with known values for the parameters.
|
|
Scope *parent_scope = fn_entry->fndef_scope->base.parent;
|
|
ZigFn *impl_fn = create_fn(ira->codegen, fn_proto_node);
|
|
impl_fn->param_source_nodes = heap::c_allocator.allocate<AstNode *>(new_fn_arg_count);
|
|
buf_init_from_buf(&impl_fn->symbol_name, &fn_entry->symbol_name);
|
|
impl_fn->fndef_scope = create_fndef_scope(ira->codegen, impl_fn->body_node, parent_scope, impl_fn);
|
|
impl_fn->child_scope = &impl_fn->fndef_scope->base;
|
|
FnTypeId inst_fn_type_id = {0};
|
|
init_fn_type_id(&inst_fn_type_id, fn_proto_node, fn_type_id->cc, new_fn_arg_count);
|
|
inst_fn_type_id.param_count = 0;
|
|
inst_fn_type_id.is_var_args = false;
|
|
|
|
// TODO maybe GenericFnTypeId can be replaced with using the child_scope directly
|
|
// as the key in generic_table
|
|
GenericFnTypeId *generic_id = heap::c_allocator.create<GenericFnTypeId>();
|
|
generic_id->fn_entry = fn_entry;
|
|
generic_id->param_count = 0;
|
|
generic_id->params = ira->codegen->pass1_arena->allocate<ZigValue>(new_fn_arg_count);
|
|
size_t next_proto_i = 0;
|
|
|
|
if (first_arg_ptr) {
|
|
assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);
|
|
|
|
bool first_arg_known_bare = false;
|
|
if (fn_type_id->next_param_index >= 1) {
|
|
ZigType *param_type = fn_type_id->param_info[next_proto_i].type;
|
|
if (type_is_invalid(param_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
first_arg_known_bare = param_type->id != ZigTypeIdPointer;
|
|
}
|
|
|
|
IrInstGen *first_arg;
|
|
if (!first_arg_known_bare) {
|
|
first_arg = first_arg_ptr;
|
|
} else {
|
|
first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
|
|
if (type_is_invalid(first_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, first_arg, first_arg_ptr_src,
|
|
&impl_fn->child_scope, &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
ZigFn *parent_fn_entry = ira->new_irb.exec->fn_entry;
|
|
assert(parent_fn_entry);
|
|
for (size_t call_i = 0; call_i < args_len; call_i += 1) {
|
|
IrInstGen *arg = args_ptr[call_i];
|
|
|
|
AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i);
|
|
assert(param_decl_node->type == NodeTypeParamDecl);
|
|
|
|
if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &arg->base, &impl_fn->child_scope,
|
|
&next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (fn_proto_node->data.fn_proto.align_expr != nullptr) {
|
|
ZigValue *align_result;
|
|
ZigValue *result_ptr;
|
|
create_result_ptr(ira->codegen, get_align_amt_type(ira->codegen), &align_result, &result_ptr);
|
|
if ((err = ir_eval_const_value(ira->codegen, impl_fn->child_scope,
|
|
fn_proto_node->data.fn_proto.align_expr, result_ptr,
|
|
ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
|
|
nullptr, nullptr, fn_proto_node->data.fn_proto.align_expr, nullptr, ira->new_irb.exec,
|
|
nullptr, UndefBad)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGenConst *const_instruction = ir_create_inst_noval<IrInstGenConst>(&ira->new_irb,
|
|
impl_fn->child_scope, fn_proto_node->data.fn_proto.align_expr);
|
|
const_instruction->base.value = align_result;
|
|
|
|
uint32_t align_bytes = 0;
|
|
ir_resolve_align(ira, &const_instruction->base, nullptr, &align_bytes);
|
|
impl_fn->align_bytes = align_bytes;
|
|
inst_fn_type_id.alignment = align_bytes;
|
|
}
|
|
|
|
if (fn_proto_node->data.fn_proto.return_anytype_token == nullptr) {
|
|
AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
|
|
ZigType *specified_return_type = ir_analyze_type_expr(ira, impl_fn->child_scope, return_type_node);
|
|
if (type_is_invalid(specified_return_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if(!is_valid_return_type(specified_return_type)){
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("call to generic function with %s return type '%s' not allowed", type_id_name(specified_return_type->id), buf_ptr(&specified_return_type->name)));
|
|
add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("function declared here"));
|
|
|
|
Tld *tld = find_decl(ira->codegen, &fn_entry->fndef_scope->base, &specified_return_type->name);
|
|
if (tld != nullptr) {
|
|
add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("type declared here"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (fn_proto_node->data.fn_proto.auto_err_set) {
|
|
ZigType *inferred_err_set_type = get_auto_err_set_type(ira->codegen, impl_fn);
|
|
if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
inst_fn_type_id.return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type);
|
|
} else {
|
|
inst_fn_type_id.return_type = specified_return_type;
|
|
}
|
|
|
|
switch (type_requires_comptime(ira->codegen, specified_return_type)) {
|
|
case ReqCompTimeYes:
|
|
// Throw out our work and call the function as if it were comptime.
|
|
return ir_analyze_fn_call(ira, source_instr, fn_entry, fn_type, fn_ref, first_arg_ptr,
|
|
first_arg_ptr_src, CallModifierCompileTime, new_stack, new_stack_src, is_async_call_builtin,
|
|
args_ptr, args_len, ret_ptr, call_result_loc);
|
|
case ReqCompTimeInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
}
|
|
|
|
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
|
|
if (existing_entry) {
|
|
// throw away all our work and use the existing function
|
|
impl_fn = existing_entry->value;
|
|
} else {
|
|
// finish instantiating the function
|
|
impl_fn->type_entry = get_fn_type(ira->codegen, &inst_fn_type_id);
|
|
if (type_is_invalid(impl_fn->type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
impl_fn->ir_executable->source_node = source_instr->source_node;
|
|
impl_fn->ir_executable->parent_exec = ira->new_irb.exec;
|
|
impl_fn->analyzed_executable.source_node = source_instr->source_node;
|
|
impl_fn->analyzed_executable.parent_exec = ira->new_irb.exec;
|
|
impl_fn->analyzed_executable.backward_branch_quota = ira->new_irb.exec->backward_branch_quota;
|
|
impl_fn->analyzed_executable.is_generic_instantiation = true;
|
|
|
|
ira->codegen->fn_defs.append(impl_fn);
|
|
}
|
|
|
|
FnTypeId *impl_fn_type_id = &impl_fn->type_entry->data.fn.fn_type_id;
|
|
|
|
if (fn_type_can_fail(impl_fn_type_id)) {
|
|
parent_fn_entry->calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack,
|
|
new_stack_src, is_async_call_builtin, impl_fn);
|
|
if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t impl_param_count = impl_fn_type_id->param_count;
|
|
if (modifier == CallModifierAsync) {
|
|
IrInstGen *result = ir_analyze_async_call(ira, source_instr, impl_fn, impl_fn->type_entry,
|
|
nullptr, casted_args, impl_param_count, casted_new_stack, is_async_call_builtin, ret_ptr,
|
|
call_result_loc);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
IrInstGen *result_loc;
|
|
if (handle_is_ptr(ira->codegen, impl_fn_type_id->return_type)) {
|
|
result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
|
|
impl_fn_type_id->return_type, nullptr, true, false);
|
|
if (result_loc != nullptr) {
|
|
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc;
|
|
}
|
|
if (result_loc->value->type->data.pointer.is_const) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *dummy_value = ir_const(ira, source_instr, impl_fn_type_id->return_type);
|
|
dummy_value->value->special = ConstValSpecialRuntime;
|
|
IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
|
|
dummy_value, result_loc->value->type->data.pointer.child_type);
|
|
if (type_is_invalid(dummy_result->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigType *res_child_type = result_loc->value->type->data.pointer.child_type;
|
|
if (res_child_type == ira->codegen->builtin_types.entry_anytype) {
|
|
res_child_type = impl_fn_type_id->return_type;
|
|
}
|
|
if (!handle_is_ptr(ira->codegen, res_child_type)) {
|
|
ir_reset_result(call_result_loc);
|
|
result_loc = nullptr;
|
|
}
|
|
}
|
|
} else if (is_async_call_builtin) {
|
|
result_loc = get_async_call_result_loc(ira, source_instr, impl_fn_type_id->return_type,
|
|
is_async_call_builtin, args_ptr, args_len, ret_ptr);
|
|
if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
result_loc = nullptr;
|
|
}
|
|
|
|
if (impl_fn_type_id->cc == CallingConventionAsync &&
|
|
parent_fn_entry->inferred_async_node == nullptr &&
|
|
modifier != CallModifierNoSuspend)
|
|
{
|
|
parent_fn_entry->inferred_async_node = fn_ref->base.source_node;
|
|
parent_fn_entry->inferred_async_fn = impl_fn;
|
|
}
|
|
|
|
IrInstGenCall *new_call_instruction = ir_build_call_gen(ira, source_instr,
|
|
impl_fn, nullptr, impl_param_count, casted_args, modifier, casted_new_stack,
|
|
is_async_call_builtin, result_loc, impl_fn_type_id->return_type);
|
|
|
|
if (get_scope_typeof(source_instr->scope) == nullptr) {
|
|
parent_fn_entry->call_list.append(new_call_instruction);
|
|
}
|
|
|
|
return ir_finish_anal(ira, &new_call_instruction->base);
|
|
}
|
|
|
|
ZigFn *parent_fn_entry = ira->new_irb.exec->fn_entry;
|
|
assert(fn_type_id->return_type != nullptr);
|
|
assert(parent_fn_entry != nullptr);
|
|
if (fn_type_can_fail(fn_type_id)) {
|
|
parent_fn_entry->calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
|
|
IrInstGen **casted_args = heap::c_allocator.allocate<IrInstGen *>(call_param_count);
|
|
size_t next_arg_index = 0;
|
|
if (first_arg_ptr) {
|
|
assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);
|
|
|
|
ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
|
|
if (type_is_invalid(param_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *first_arg;
|
|
if (param_type->id == ZigTypeIdPointer) {
|
|
first_arg = first_arg_ptr;
|
|
} else {
|
|
first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
|
|
if (type_is_invalid(first_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_arg = ir_implicit_cast2(ira, first_arg_ptr_src, first_arg, param_type);
|
|
if (type_is_invalid(casted_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
casted_args[next_arg_index] = casted_arg;
|
|
next_arg_index += 1;
|
|
}
|
|
for (size_t call_i = 0; call_i < args_len; call_i += 1) {
|
|
IrInstGen *old_arg = args_ptr[call_i];
|
|
if (type_is_invalid(old_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_arg;
|
|
if (next_arg_index < src_param_count) {
|
|
ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
|
|
if (type_is_invalid(param_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
casted_arg = ir_implicit_cast(ira, old_arg, param_type);
|
|
if (type_is_invalid(casted_arg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
casted_arg = old_arg;
|
|
}
|
|
|
|
casted_args[next_arg_index] = casted_arg;
|
|
next_arg_index += 1;
|
|
}
|
|
|
|
assert(next_arg_index == call_param_count);
|
|
|
|
ZigType *return_type = fn_type_id->return_type;
|
|
if (type_is_invalid(return_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && modifier == CallModifierNeverInline) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("no-inline call of inline function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack, new_stack_src,
|
|
is_async_call_builtin, fn_entry);
|
|
if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (modifier == CallModifierAsync) {
|
|
IrInstGen *result = ir_analyze_async_call(ira, source_instr, fn_entry, fn_type, fn_ref,
|
|
casted_args, call_param_count, casted_new_stack, is_async_call_builtin, ret_ptr, call_result_loc);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
if (fn_type_id->cc == CallingConventionAsync &&
|
|
parent_fn_entry->inferred_async_node == nullptr &&
|
|
modifier != CallModifierNoSuspend)
|
|
{
|
|
parent_fn_entry->inferred_async_node = fn_ref->base.source_node;
|
|
parent_fn_entry->inferred_async_fn = fn_entry;
|
|
}
|
|
|
|
IrInstGen *result_loc;
|
|
if (handle_is_ptr(ira->codegen, return_type)) {
|
|
result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
|
|
return_type, nullptr, true, false);
|
|
if (result_loc != nullptr) {
|
|
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc;
|
|
}
|
|
if (result_loc->value->type->data.pointer.is_const) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *expected_return_type = result_loc->value->type->data.pointer.child_type;
|
|
|
|
IrInstGen *dummy_value = ir_const(ira, source_instr, return_type);
|
|
dummy_value->value->special = ConstValSpecialRuntime;
|
|
IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
|
|
dummy_value, expected_return_type);
|
|
if (type_is_invalid(dummy_result->value->type)) {
|
|
if ((return_type->id == ZigTypeIdErrorUnion || return_type->id == ZigTypeIdErrorSet) &&
|
|
expected_return_type->id != ZigTypeIdErrorUnion && expected_return_type->id != ZigTypeIdErrorSet)
|
|
{
|
|
if (call_result_loc->id == ResultLocIdReturn) {
|
|
add_error_note(ira->codegen, ira->new_irb.exec->first_err_trace_msg,
|
|
ira->explicit_return_type_source_node, buf_sprintf("function cannot return an error"));
|
|
} else {
|
|
add_error_note(ira->codegen, ira->new_irb.exec->first_err_trace_msg, result_loc->base.source_node,
|
|
buf_sprintf("cannot store an error in type '%s'", buf_ptr(&expected_return_type->name)));
|
|
}
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (expected_return_type == ira->codegen->builtin_types.entry_anytype) {
|
|
expected_return_type = return_type;
|
|
}
|
|
if (!handle_is_ptr(ira->codegen, expected_return_type)) {
|
|
ir_reset_result(call_result_loc);
|
|
result_loc = nullptr;
|
|
}
|
|
}
|
|
} else if (is_async_call_builtin) {
|
|
result_loc = get_async_call_result_loc(ira, source_instr, return_type, is_async_call_builtin,
|
|
args_ptr, args_len, ret_ptr);
|
|
if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
result_loc = nullptr;
|
|
}
|
|
|
|
IrInstGenCall *new_call_instruction = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
|
|
call_param_count, casted_args, modifier, casted_new_stack,
|
|
is_async_call_builtin, result_loc, return_type);
|
|
if (get_scope_typeof(source_instr->scope) == nullptr) {
|
|
parent_fn_entry->call_list.append(new_call_instruction);
|
|
}
|
|
return ir_finish_anal(ira, &new_call_instruction->base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_fn_call_src(IrAnalyze *ira, IrInstSrcCall *call_instruction,
|
|
ZigFn *fn_entry, ZigType *fn_type, IrInstGen *fn_ref,
|
|
IrInstGen *first_arg_ptr, IrInst *first_arg_ptr_src, CallModifier modifier)
|
|
{
|
|
IrInstGen *new_stack = nullptr;
|
|
IrInst *new_stack_src = nullptr;
|
|
if (call_instruction->new_stack) {
|
|
new_stack = call_instruction->new_stack->child;
|
|
if (type_is_invalid(new_stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
new_stack_src = &call_instruction->new_stack->base;
|
|
}
|
|
IrInstGen **args_ptr = heap::c_allocator.allocate<IrInstGen *>(call_instruction->arg_count);
|
|
for (size_t i = 0; i < call_instruction->arg_count; i += 1) {
|
|
args_ptr[i] = call_instruction->args[i]->child;
|
|
if (type_is_invalid(args_ptr[i]->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *ret_ptr = nullptr;
|
|
if (call_instruction->ret_ptr != nullptr) {
|
|
ret_ptr = call_instruction->ret_ptr->child;
|
|
if (type_is_invalid(ret_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstGen *result = ir_analyze_fn_call(ira, &call_instruction->base.base, fn_entry, fn_type, fn_ref,
|
|
first_arg_ptr, first_arg_ptr_src, modifier, new_stack, new_stack_src,
|
|
call_instruction->is_async_call_builtin, args_ptr, call_instruction->arg_count, ret_ptr,
|
|
call_instruction->result_loc);
|
|
heap::c_allocator.deallocate(args_ptr, call_instruction->arg_count);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_call_extra(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstSrc *pass1_options, IrInstSrc *pass1_fn_ref, IrInstGen **args_ptr, size_t args_len,
|
|
ResultLoc *result_loc)
|
|
{
|
|
IrInstGen *options = pass1_options->child;
|
|
if (type_is_invalid(options->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *fn_ref = pass1_fn_ref->child;
|
|
if (type_is_invalid(fn_ref->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *modifier_field = find_struct_type_field(options->value->type, buf_create_from_str("modifier"));
|
|
ir_assert(modifier_field != nullptr, source_instr);
|
|
IrInstGen *modifier_inst = ir_analyze_struct_value_field_value(ira, source_instr, options, modifier_field);
|
|
ZigValue *modifier_val = ir_resolve_const(ira, modifier_inst, UndefBad);
|
|
if (modifier_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
CallModifier modifier = (CallModifier)bigint_as_u32(&modifier_val->data.x_enum_tag);
|
|
|
|
if (ir_should_inline(ira->old_irb.exec, source_instr->scope)) {
|
|
switch (modifier) {
|
|
case CallModifierBuiltin:
|
|
zig_unreachable();
|
|
case CallModifierAsync:
|
|
ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @call with async modifier"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
case CallModifierCompileTime:
|
|
case CallModifierNone:
|
|
case CallModifierAlwaysInline:
|
|
case CallModifierAlwaysTail:
|
|
case CallModifierNoSuspend:
|
|
modifier = CallModifierCompileTime;
|
|
break;
|
|
case CallModifierNeverInline:
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("unable to perform 'never_inline' call at compile-time"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
case CallModifierNeverTail:
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("unable to perform 'never_tail' call at compile-time"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *first_arg_ptr = nullptr;
|
|
IrInst *first_arg_ptr_src = nullptr;
|
|
ZigFn *fn = nullptr;
|
|
if (instr_is_comptime(fn_ref)) {
|
|
if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
|
|
assert(fn_ref->value->special == ConstValSpecialStatic);
|
|
fn = fn_ref->value->data.x_bound_fn.fn;
|
|
first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
|
|
first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
|
|
if (type_is_invalid(first_arg_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
fn = ir_resolve_fn(ira, fn_ref);
|
|
}
|
|
}
|
|
|
|
// Some modifiers require the callee to be comptime-known
|
|
switch (modifier) {
|
|
case CallModifierCompileTime:
|
|
case CallModifierAlwaysInline:
|
|
case CallModifierAsync:
|
|
if (fn == nullptr) {
|
|
ir_add_error(ira, &modifier_inst->base,
|
|
buf_sprintf("the specified modifier requires a comptime-known function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZIG_FALLTHROUGH;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;
|
|
|
|
TypeStructField *stack_field = find_struct_type_field(options->value->type, buf_create_from_str("stack"));
|
|
ir_assert(stack_field != nullptr, source_instr);
|
|
IrInstGen *opt_stack = ir_analyze_struct_value_field_value(ira, source_instr, options, stack_field);
|
|
if (type_is_invalid(opt_stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *stack_is_non_null_inst = ir_analyze_test_non_null(ira, source_instr, opt_stack);
|
|
bool stack_is_non_null;
|
|
if (!ir_resolve_bool(ira, stack_is_non_null_inst, &stack_is_non_null))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *stack = nullptr;
|
|
IrInst *stack_src = nullptr;
|
|
if (stack_is_non_null) {
|
|
stack = ir_analyze_optional_value_payload_value(ira, source_instr, opt_stack, false);
|
|
if (type_is_invalid(stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
stack_src = &stack->base;
|
|
}
|
|
|
|
return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr, first_arg_ptr_src,
|
|
modifier, stack, stack_src, false, args_ptr, args_len, nullptr, result_loc);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_async_call_extra(IrAnalyze *ira, IrInst* source_instr, CallModifier modifier,
|
|
IrInstSrc *pass1_fn_ref, IrInstSrc *ret_ptr, IrInstSrc *new_stack, IrInstGen **args_ptr, size_t args_len, ResultLoc *result_loc)
|
|
{
|
|
IrInstGen *fn_ref = pass1_fn_ref->child;
|
|
if (type_is_invalid(fn_ref->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ir_should_inline(ira->old_irb.exec, source_instr->scope)) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @asyncCall"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *first_arg_ptr = nullptr;
|
|
IrInst *first_arg_ptr_src = nullptr;
|
|
ZigFn *fn = nullptr;
|
|
if (instr_is_comptime(fn_ref)) {
|
|
if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
|
|
assert(fn_ref->value->special == ConstValSpecialStatic);
|
|
fn = fn_ref->value->data.x_bound_fn.fn;
|
|
first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
|
|
first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
|
|
if (type_is_invalid(first_arg_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
fn = ir_resolve_fn(ira, fn_ref);
|
|
}
|
|
}
|
|
|
|
IrInstGen *ret_ptr_uncasted = nullptr;
|
|
if (ret_ptr != nullptr) {
|
|
ret_ptr_uncasted = ret_ptr->child;
|
|
if (type_is_invalid(ret_ptr_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;
|
|
IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack->child,
|
|
&new_stack->base, true, fn);
|
|
if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr, first_arg_ptr_src,
|
|
modifier, casted_new_stack, &new_stack->base, true, args_ptr, args_len, ret_ptr_uncasted, result_loc);
|
|
}
|
|
|
|
static bool ir_extract_tuple_call_args(IrAnalyze *ira, IrInst *source_instr, IrInstGen *args, IrInstGen ***args_ptr, size_t *args_len) {
|
|
ZigType *args_type = args->value->type;
|
|
if (type_is_invalid(args_type))
|
|
return false;
|
|
|
|
if (args_type->id != ZigTypeIdStruct) {
|
|
ir_add_error(ira, &args->base,
|
|
buf_sprintf("expected tuple or struct, found '%s'", buf_ptr(&args_type->name)));
|
|
return false;
|
|
}
|
|
|
|
if (is_tuple(args_type)) {
|
|
*args_len = args_type->data.structure.src_field_count;
|
|
*args_ptr = heap::c_allocator.allocate<IrInstGen *>(*args_len);
|
|
for (size_t i = 0; i < *args_len; i += 1) {
|
|
TypeStructField *arg_field = args_type->data.structure.fields[i];
|
|
(*args_ptr)[i] = ir_analyze_struct_value_field_value(ira, source_instr, args, arg_field);
|
|
if (type_is_invalid((*args_ptr)[i]->value->type))
|
|
return false;
|
|
}
|
|
} else {
|
|
ir_add_error(ira, &args->base, buf_sprintf("TODO: struct args"));
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstSrcCallExtra *instruction) {
|
|
IrInstGen *args = instruction->args->child;
|
|
IrInstGen **args_ptr = nullptr;
|
|
size_t args_len = 0;
|
|
if (!ir_extract_tuple_call_args(ira, &instruction->base.base, args, &args_ptr, &args_len)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_analyze_call_extra(ira, &instruction->base.base, instruction->options,
|
|
instruction->fn_ref, args_ptr, args_len, instruction->result_loc);
|
|
heap::c_allocator.deallocate(args_ptr, args_len);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_async_call_extra(IrAnalyze *ira, IrInstSrcAsyncCallExtra *instruction) {
|
|
IrInstGen *args = instruction->args->child;
|
|
IrInstGen **args_ptr = nullptr;
|
|
size_t args_len = 0;
|
|
if (!ir_extract_tuple_call_args(ira, &instruction->base.base, args, &args_ptr, &args_len)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_analyze_async_call_extra(ira, &instruction->base.base, instruction->modifier,
|
|
instruction->fn_ref, instruction->ret_ptr, instruction->new_stack, args_ptr, args_len, instruction->result_loc);
|
|
heap::c_allocator.deallocate(args_ptr, args_len);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_call_args(IrAnalyze *ira, IrInstSrcCallArgs *instruction) {
|
|
IrInstGen **args_ptr = heap::c_allocator.allocate<IrInstGen *>(instruction->args_len);
|
|
for (size_t i = 0; i < instruction->args_len; i += 1) {
|
|
args_ptr[i] = instruction->args_ptr[i]->child;
|
|
if (type_is_invalid(args_ptr[i]->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_analyze_call_extra(ira, &instruction->base.base, instruction->options,
|
|
instruction->fn_ref, args_ptr, instruction->args_len, instruction->result_loc);
|
|
heap::c_allocator.deallocate(args_ptr, instruction->args_len);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_call(IrAnalyze *ira, IrInstSrcCall *call_instruction) {
|
|
IrInstGen *fn_ref = call_instruction->fn_ref->child;
|
|
if (type_is_invalid(fn_ref->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool is_comptime = (call_instruction->modifier == CallModifierCompileTime) ||
|
|
ir_should_inline(ira->old_irb.exec, call_instruction->base.base.scope);
|
|
CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
|
|
|
|
if (is_comptime || instr_is_comptime(fn_ref)) {
|
|
if (fn_ref->value->type->id == ZigTypeIdMetaType) {
|
|
ZigType *ty = ir_resolve_type(ira, fn_ref);
|
|
if (ty == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ErrorMsg *msg = ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("type '%s' not a function", buf_ptr(&ty->name)));
|
|
add_error_note(ira->codegen, msg, call_instruction->base.base.source_node,
|
|
buf_sprintf("use @as builtin for type coercion"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (fn_ref->value->type->id == ZigTypeIdFn) {
|
|
ZigFn *fn_table_entry = ir_resolve_fn(ira, fn_ref);
|
|
ZigType *fn_type = fn_table_entry ? fn_table_entry->type_entry : fn_ref->value->type;
|
|
CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
|
|
return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_type,
|
|
fn_ref, nullptr, nullptr, modifier);
|
|
} else if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
|
|
assert(fn_ref->value->special == ConstValSpecialStatic);
|
|
ZigFn *fn_table_entry = fn_ref->value->data.x_bound_fn.fn;
|
|
IrInstGen *first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
|
|
IrInst *first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
|
|
CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
|
|
return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
|
|
fn_ref, first_arg_ptr, first_arg_ptr_src, modifier);
|
|
} else {
|
|
ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (fn_ref->value->type->id == ZigTypeIdFn) {
|
|
return ir_analyze_fn_call_src(ira, call_instruction, nullptr, fn_ref->value->type,
|
|
fn_ref, nullptr, nullptr, modifier);
|
|
} else {
|
|
ir_add_error(ira, &fn_ref->base,
|
|
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
// out_val->type must be the type to read the pointer as
|
|
// if the type is different than the actual type then it does a comptime byte reinterpretation
|
|
static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
|
|
ZigValue *out_val, ZigValue *ptr_val)
|
|
{
|
|
Error err;
|
|
assert(out_val->type != nullptr);
|
|
|
|
ZigValue *pointee = const_ptr_pointee_unchecked(codegen, ptr_val);
|
|
src_assert(pointee->type != nullptr, source_node);
|
|
|
|
if ((err = type_resolve(codegen, pointee->type, ResolveStatusSizeKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
if ((err = type_resolve(codegen, out_val->type, ResolveStatusSizeKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
size_t src_size = type_size(codegen, pointee->type);
|
|
size_t dst_size = type_size(codegen, out_val->type);
|
|
|
|
if (dst_size <= src_size) {
|
|
if (src_size == dst_size && types_have_same_zig_comptime_repr(codegen, out_val->type, pointee->type)) {
|
|
copy_const_val(codegen, out_val, pointee);
|
|
return ErrorNone;
|
|
}
|
|
Buf buf = BUF_INIT;
|
|
buf_resize(&buf, src_size);
|
|
buf_write_value_bytes(codegen, (uint8_t*)buf_ptr(&buf), pointee);
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, (uint8_t*)buf_ptr(&buf), out_val)))
|
|
return err;
|
|
buf_deinit(&buf);
|
|
return ErrorNone;
|
|
}
|
|
|
|
switch (ptr_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialNull:
|
|
if (dst_size == 0)
|
|
return ErrorNone;
|
|
opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("attempt to read %" ZIG_PRI_usize " bytes from null pointer",
|
|
dst_size));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ConstPtrSpecialRef: {
|
|
opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("attempt to read %" ZIG_PRI_usize " bytes from pointer to %s which is %" ZIG_PRI_usize " bytes",
|
|
dst_size, buf_ptr(&pointee->type->name), src_size));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
case ConstPtrSpecialSubArray: {
|
|
ZigValue *array_val = ptr_val->data.x_ptr.data.base_array.array_val;
|
|
assert(array_val->type->id == ZigTypeIdArray);
|
|
if (array_val->data.x_array.special != ConstArraySpecialNone)
|
|
zig_panic("TODO");
|
|
if (dst_size > src_size) {
|
|
size_t elem_index = ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("attempt to read %" ZIG_PRI_usize " bytes from %s at index %" ZIG_PRI_usize " which is %" ZIG_PRI_usize " bytes",
|
|
dst_size, buf_ptr(&array_val->type->name), elem_index, src_size));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
size_t elem_size = src_size;
|
|
size_t elem_count = (dst_size % elem_size == 0) ? (dst_size / elem_size) : (dst_size / elem_size + 1);
|
|
Buf buf = BUF_INIT;
|
|
buf_resize(&buf, elem_count * elem_size);
|
|
for (size_t i = 0; i < elem_count; i += 1) {
|
|
ZigValue *elem_val = &array_val->data.x_array.data.s_none.elements[i];
|
|
buf_write_value_bytes(codegen, (uint8_t*)buf_ptr(&buf) + (i * elem_size), elem_val);
|
|
}
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, (uint8_t*)buf_ptr(&buf), out_val)))
|
|
return err;
|
|
buf_deinit(&buf);
|
|
return ErrorNone;
|
|
}
|
|
case ConstPtrSpecialBaseArray: {
|
|
ZigValue *array_val = ptr_val->data.x_ptr.data.base_array.array_val;
|
|
assert(array_val->type->id == ZigTypeIdArray);
|
|
if (array_val->data.x_array.special != ConstArraySpecialNone)
|
|
zig_panic("TODO");
|
|
size_t elem_size = src_size;
|
|
size_t elem_index = ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
src_size = elem_size * (array_val->type->data.array.len - elem_index);
|
|
if (dst_size > src_size) {
|
|
opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("attempt to read %" ZIG_PRI_usize " bytes from %s at index %" ZIG_PRI_usize " which is %" ZIG_PRI_usize " bytes",
|
|
dst_size, buf_ptr(&array_val->type->name), elem_index, src_size));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
size_t elem_count = (dst_size % elem_size == 0) ? (dst_size / elem_size) : (dst_size / elem_size + 1);
|
|
Buf buf = BUF_INIT;
|
|
buf_resize(&buf, elem_count * elem_size);
|
|
for (size_t i = 0; i < elem_count; i += 1) {
|
|
ZigValue *elem_val = &array_val->data.x_array.data.s_none.elements[elem_index + i];
|
|
buf_write_value_bytes(codegen, (uint8_t*)buf_ptr(&buf) + (i * elem_size), elem_val);
|
|
}
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, (uint8_t*)buf_ptr(&buf), out_val)))
|
|
return err;
|
|
buf_deinit(&buf);
|
|
return ErrorNone;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
case ConstPtrSpecialDiscard:
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO");
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_optional_type(IrAnalyze *ira, IrInstSrcUnOp *instruction) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueOptType *lazy_opt_type = heap::c_allocator.create<LazyValueOptType>();
|
|
lazy_opt_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_opt_type->base;
|
|
lazy_opt_type->base.id = LazyValueIdOptType;
|
|
|
|
lazy_opt_type->payload_type = instruction->value->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_opt_type->payload_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static ErrorMsg *ir_eval_negation_scalar(IrAnalyze *ira, IrInst* source_instr, ZigType *scalar_type,
|
|
ZigValue *operand_val, ZigValue *scalar_out_val, bool is_wrap_op)
|
|
{
|
|
bool is_float = (scalar_type->id == ZigTypeIdFloat || scalar_type->id == ZigTypeIdComptimeFloat);
|
|
|
|
bool ok_type = ((scalar_type->id == ZigTypeIdInt && scalar_type->data.integral.is_signed) ||
|
|
scalar_type->id == ZigTypeIdComptimeInt || (is_float && !is_wrap_op));
|
|
|
|
if (!ok_type) {
|
|
const char *fmt = is_wrap_op ? "invalid wrapping negation type: '%s'" : "invalid negation type: '%s'";
|
|
return ir_add_error(ira, source_instr, buf_sprintf(fmt, buf_ptr(&scalar_type->name)));
|
|
}
|
|
|
|
if (is_float) {
|
|
float_negate(scalar_out_val, operand_val);
|
|
} else if (is_wrap_op) {
|
|
bigint_negate_wrap(&scalar_out_val->data.x_bigint, &operand_val->data.x_bigint,
|
|
scalar_type->data.integral.bit_count);
|
|
} else {
|
|
bigint_negate(&scalar_out_val->data.x_bigint, &operand_val->data.x_bigint);
|
|
}
|
|
|
|
scalar_out_val->type = scalar_type;
|
|
scalar_out_val->special = ConstValSpecialStatic;
|
|
|
|
if (is_wrap_op || is_float || scalar_type->id == ZigTypeIdComptimeInt) {
|
|
return nullptr;
|
|
}
|
|
|
|
if (!bigint_fits_in_bits(&scalar_out_val->data.x_bigint, scalar_type->data.integral.bit_count, true)) {
|
|
return ir_add_error(ira, source_instr, buf_sprintf("negation caused overflow"));
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_negation(IrAnalyze *ira, IrInstSrcUnOp *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
ZigType *expr_type = value->value->type;
|
|
if (type_is_invalid(expr_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool is_wrap_op = (instruction->op_id == IrUnOpNegationWrap);
|
|
|
|
ZigType *scalar_type = (expr_type->id == ZigTypeIdVector) ?
|
|
expr_type->data.vector.elem_type : expr_type;
|
|
|
|
switch (scalar_type->id) {
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdComptimeFloat:
|
|
break;
|
|
case ZigTypeIdInt:
|
|
if (is_wrap_op || scalar_type->data.integral.is_signed)
|
|
break;
|
|
ZIG_FALLTHROUGH;
|
|
default:
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("negation of type '%s'", buf_ptr(&scalar_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *operand_val = ir_resolve_const(ira, value, UndefBad);
|
|
if (!operand_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result_instruction = ir_const(ira, &instruction->base.base, expr_type);
|
|
ZigValue *out_val = result_instruction->value;
|
|
if (expr_type->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, operand_val);
|
|
out_val->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, out_val);
|
|
size_t len = expr_type->data.vector.len;
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
ZigValue *scalar_operand_val = &operand_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *scalar_out_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
assert(scalar_operand_val->type == scalar_type);
|
|
assert(scalar_out_val->type == scalar_type);
|
|
ErrorMsg *msg = ir_eval_negation_scalar(ira, &instruction->base.base, scalar_type,
|
|
scalar_operand_val, scalar_out_val, is_wrap_op);
|
|
if (msg != nullptr) {
|
|
add_error_note(ira->codegen, msg, instruction->base.base.source_node,
|
|
buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
out_val->type = expr_type;
|
|
out_val->special = ConstValSpecialStatic;
|
|
} else {
|
|
if (ir_eval_negation_scalar(ira, &instruction->base.base, scalar_type, operand_val, out_val,
|
|
is_wrap_op) != nullptr)
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
return result_instruction;
|
|
}
|
|
|
|
return ir_build_negation(ira, &instruction->base.base, value, expr_type, is_wrap_op);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bin_not(IrAnalyze *ira, IrInstSrcUnOp *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
ZigType *expr_type = value->value->type;
|
|
if (type_is_invalid(expr_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *scalar_type = (expr_type->id == ZigTypeIdVector) ?
|
|
expr_type->data.vector.elem_type : expr_type;
|
|
|
|
if (scalar_type->id != ZigTypeIdInt) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("unable to perform binary not operation on type '%s'", buf_ptr(&expr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *expr_val = ir_resolve_const(ira, value, UndefBad);
|
|
if (expr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, expr_type);
|
|
|
|
if (expr_type->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, expr_val);
|
|
result->value->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, result->value);
|
|
|
|
for (size_t i = 0; i < expr_type->data.vector.len; i++) {
|
|
ZigValue *src_val = &expr_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *dst_val = &result->value->data.x_array.data.s_none.elements[i];
|
|
|
|
dst_val->type = scalar_type;
|
|
dst_val->special = ConstValSpecialStatic;
|
|
bigint_not(&dst_val->data.x_bigint, &src_val->data.x_bigint,
|
|
scalar_type->data.integral.bit_count, scalar_type->data.integral.is_signed);
|
|
}
|
|
} else {
|
|
bigint_not(&result->value->data.x_bigint, &expr_val->data.x_bigint,
|
|
scalar_type->data.integral.bit_count, scalar_type->data.integral.is_signed);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
return ir_build_binary_not(ira, &instruction->base.base, value, expr_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstSrcUnOp *instruction) {
|
|
IrUnOp op_id = instruction->op_id;
|
|
switch (op_id) {
|
|
case IrUnOpInvalid:
|
|
zig_unreachable();
|
|
case IrUnOpBinNot:
|
|
return ir_analyze_bin_not(ira, instruction);
|
|
case IrUnOpNegation:
|
|
case IrUnOpNegationWrap:
|
|
return ir_analyze_negation(ira, instruction);
|
|
case IrUnOpDereference: {
|
|
IrInstGen *ptr = instruction->value->child;
|
|
if (type_is_invalid(ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigType *ptr_type = ptr->value->type;
|
|
if (ptr_type->id == ZigTypeIdPointer && ptr_type->data.pointer.ptr_len == PtrLenUnknown) {
|
|
ir_add_error_node(ira, instruction->base.base.source_node,
|
|
buf_sprintf("index syntax required for unknown-length pointer type '%s'",
|
|
buf_ptr(&ptr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_get_deref(ira, &instruction->base.base, ptr, instruction->result_loc);
|
|
if (type_is_invalid(result->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// If the result needs to be an lvalue, type check it
|
|
if (instruction->lval != LValNone && result->value->type->id != ZigTypeIdPointer) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("attempt to dereference non-pointer type '%s'", buf_ptr(&result->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
case IrUnOpOptional:
|
|
return ir_analyze_optional_type(ira, instruction);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static void ir_push_resume(IrAnalyze *ira, IrSuspendPosition pos) {
|
|
IrBasicBlockSrc *old_bb = ira->old_irb.exec->basic_block_list.at(pos.basic_block_index);
|
|
if (old_bb->in_resume_stack) return;
|
|
ira->resume_stack.append(pos);
|
|
old_bb->in_resume_stack = true;
|
|
}
|
|
|
|
static void ir_push_resume_block(IrAnalyze *ira, IrBasicBlockSrc *old_bb) {
|
|
if (ira->resume_stack.length != 0) {
|
|
ir_push_resume(ira, {old_bb->index, 0});
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_br(IrAnalyze *ira, IrInstSrcBr *br_instruction) {
|
|
IrBasicBlockSrc *old_dest_block = br_instruction->dest_block;
|
|
|
|
bool is_comptime;
|
|
if (!ir_resolve_comptime(ira, br_instruction->is_comptime->child, &is_comptime))
|
|
return ir_unreach_error(ira);
|
|
|
|
if (is_comptime || (old_dest_block->ref_count == 1 && old_dest_block->suspend_instruction_ref == nullptr))
|
|
return ir_inline_bb(ira, &br_instruction->base.base, old_dest_block);
|
|
|
|
IrBasicBlockGen *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &br_instruction->base.base);
|
|
if (new_bb == nullptr)
|
|
return ir_unreach_error(ira);
|
|
|
|
ir_push_resume_block(ira, old_dest_block);
|
|
|
|
IrInstGen *result = ir_build_br_gen(ira, &br_instruction->base.base, new_bb);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_cond_br(IrAnalyze *ira, IrInstSrcCondBr *cond_br_instruction) {
|
|
IrInstGen *condition = cond_br_instruction->condition->child;
|
|
if (type_is_invalid(condition->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
bool is_comptime;
|
|
if (!ir_resolve_comptime(ira, cond_br_instruction->is_comptime->child, &is_comptime))
|
|
return ir_unreach_error(ira);
|
|
|
|
ZigType *bool_type = ira->codegen->builtin_types.entry_bool;
|
|
IrInstGen *casted_condition = ir_implicit_cast(ira, condition, bool_type);
|
|
if (type_is_invalid(casted_condition->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
if (is_comptime || instr_is_comptime(casted_condition)) {
|
|
bool cond_is_true;
|
|
if (!ir_resolve_bool(ira, casted_condition, &cond_is_true))
|
|
return ir_unreach_error(ira);
|
|
|
|
IrBasicBlockSrc *old_dest_block = cond_is_true ?
|
|
cond_br_instruction->then_block : cond_br_instruction->else_block;
|
|
|
|
if (is_comptime || (old_dest_block->ref_count == 1 && old_dest_block->suspend_instruction_ref == nullptr))
|
|
return ir_inline_bb(ira, &cond_br_instruction->base.base, old_dest_block);
|
|
|
|
IrBasicBlockGen *new_dest_block = ir_get_new_bb_runtime(ira, old_dest_block, &cond_br_instruction->base.base);
|
|
if (new_dest_block == nullptr)
|
|
return ir_unreach_error(ira);
|
|
|
|
ir_push_resume_block(ira, old_dest_block);
|
|
|
|
IrInstGen *result = ir_build_br_gen(ira, &cond_br_instruction->base.base, new_dest_block);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
assert(cond_br_instruction->then_block != cond_br_instruction->else_block);
|
|
IrBasicBlockGen *new_then_block = ir_get_new_bb_runtime(ira, cond_br_instruction->then_block, &cond_br_instruction->base.base);
|
|
if (new_then_block == nullptr)
|
|
return ir_unreach_error(ira);
|
|
|
|
IrBasicBlockGen *new_else_block = ir_get_new_bb_runtime(ira, cond_br_instruction->else_block, &cond_br_instruction->base.base);
|
|
if (new_else_block == nullptr)
|
|
return ir_unreach_error(ira);
|
|
|
|
ir_push_resume_block(ira, cond_br_instruction->else_block);
|
|
ir_push_resume_block(ira, cond_br_instruction->then_block);
|
|
|
|
IrInstGen *result = ir_build_cond_br_gen(ira, &cond_br_instruction->base.base,
|
|
casted_condition, new_then_block, new_else_block);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_unreachable(IrAnalyze *ira,
|
|
IrInstSrcUnreachable *unreachable_instruction)
|
|
{
|
|
if (ir_should_inline(ira->old_irb.exec, unreachable_instruction->base.base.scope)) {
|
|
ir_add_error(ira, &unreachable_instruction->base.base, buf_sprintf("reached unreachable code"));
|
|
return ir_unreach_error(ira);
|
|
}
|
|
|
|
IrInstGen *result = ir_build_unreachable_gen(ira, &unreachable_instruction->base.base);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstSrcPhi *phi_instruction) {
|
|
Error err;
|
|
|
|
if (ira->const_predecessor_bb) {
|
|
for (size_t i = 0; i < phi_instruction->incoming_count; i += 1) {
|
|
IrBasicBlockSrc *predecessor = phi_instruction->incoming_blocks[i];
|
|
if (predecessor != ira->const_predecessor_bb)
|
|
continue;
|
|
IrInstGen *value = phi_instruction->incoming_values[i]->child;
|
|
assert(value->value->type);
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (value->value->special != ConstValSpecialRuntime) {
|
|
IrInstGen *result = ir_const(ira, &phi_instruction->base.base, nullptr);
|
|
copy_const_val(ira->codegen, result->value, value->value);
|
|
return result;
|
|
} else {
|
|
return value;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
ResultLocPeerParent *peer_parent = phi_instruction->peer_parent;
|
|
if (peer_parent != nullptr && !peer_parent->skipped && !peer_parent->done_resuming &&
|
|
peer_parent->peers.length >= 2)
|
|
{
|
|
if (peer_parent->resolved_type == nullptr) {
|
|
IrInstGen **instructions = heap::c_allocator.allocate<IrInstGen *>(peer_parent->peers.length);
|
|
for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
|
|
ResultLocPeer *this_peer = peer_parent->peers.at(i);
|
|
|
|
IrInstGen *gen_instruction = this_peer->base.gen_instruction;
|
|
if (gen_instruction == nullptr) {
|
|
// unreachable instructions will cause implicit_elem_type to be null
|
|
if (this_peer->base.implicit_elem_type == nullptr) {
|
|
instructions[i] = ir_const_unreachable(ira, &this_peer->base.source_instruction->base);
|
|
} else {
|
|
instructions[i] = ir_const(ira, &this_peer->base.source_instruction->base,
|
|
this_peer->base.implicit_elem_type);
|
|
instructions[i]->value->special = ConstValSpecialRuntime;
|
|
}
|
|
} else {
|
|
instructions[i] = gen_instruction;
|
|
}
|
|
|
|
}
|
|
ZigType *expected_type = ir_result_loc_expected_type(ira, &phi_instruction->base.base, peer_parent->parent);
|
|
peer_parent->resolved_type = ir_resolve_peer_types(ira,
|
|
peer_parent->base.source_instruction->base.source_node, expected_type, instructions,
|
|
peer_parent->peers.length);
|
|
if (type_is_invalid(peer_parent->resolved_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// the logic below assumes there are no instructions in the new current basic block yet
|
|
ir_assert(ira->new_irb.current_basic_block->instruction_list.length == 0, &phi_instruction->base.base);
|
|
|
|
// In case resolving the parent activates a suspend, do it now
|
|
IrInstGen *parent_result_loc = ir_resolve_result(ira, &phi_instruction->base.base, peer_parent->parent,
|
|
peer_parent->resolved_type, nullptr, false, true);
|
|
if (parent_result_loc != nullptr &&
|
|
(type_is_invalid(parent_result_loc->value->type) || parent_result_loc->value->type->id == ZigTypeIdUnreachable))
|
|
{
|
|
return parent_result_loc;
|
|
}
|
|
// If the above code generated any instructions in the current basic block, we need
|
|
// to move them to the peer parent predecessor.
|
|
ZigList<IrInstGen *> instrs_to_move = {};
|
|
while (ira->new_irb.current_basic_block->instruction_list.length != 0) {
|
|
instrs_to_move.append(ira->new_irb.current_basic_block->instruction_list.pop());
|
|
}
|
|
if (instrs_to_move.length != 0) {
|
|
IrBasicBlockGen *predecessor = peer_parent->base.source_instruction->child->owner_bb;
|
|
IrInstGen *branch_instruction = predecessor->instruction_list.pop();
|
|
ir_assert(branch_instruction->value->type->id == ZigTypeIdUnreachable, &phi_instruction->base.base);
|
|
while (instrs_to_move.length != 0) {
|
|
predecessor->instruction_list.append(instrs_to_move.pop());
|
|
}
|
|
predecessor->instruction_list.append(branch_instruction);
|
|
instrs_to_move.deinit();
|
|
}
|
|
}
|
|
|
|
IrSuspendPosition suspend_pos;
|
|
ira_suspend(ira, &phi_instruction->base.base, nullptr, &suspend_pos);
|
|
ir_push_resume(ira, suspend_pos);
|
|
|
|
for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
|
|
ResultLocPeer *opposite_peer = peer_parent->peers.at(peer_parent->peers.length - i - 1);
|
|
if (opposite_peer->base.implicit_elem_type != nullptr &&
|
|
opposite_peer->base.implicit_elem_type->id != ZigTypeIdUnreachable)
|
|
{
|
|
ir_push_resume(ira, opposite_peer->suspend_pos);
|
|
}
|
|
}
|
|
|
|
peer_parent->done_resuming = true;
|
|
return ira_resume(ira);
|
|
}
|
|
|
|
ZigList<IrBasicBlockGen*> new_incoming_blocks = {0};
|
|
ZigList<IrInstGen*> new_incoming_values = {0};
|
|
|
|
for (size_t i = 0; i < phi_instruction->incoming_count; i += 1) {
|
|
IrBasicBlockSrc *predecessor = phi_instruction->incoming_blocks[i];
|
|
if (predecessor->ref_count == 0)
|
|
continue;
|
|
|
|
|
|
IrInstSrc *old_value = phi_instruction->incoming_values[i];
|
|
assert(old_value);
|
|
IrInstGen *new_value = old_value->child;
|
|
if (!new_value || new_value->value->type->id == ZigTypeIdUnreachable || predecessor->child == nullptr)
|
|
continue;
|
|
|
|
if (type_is_invalid(new_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
|
|
assert(predecessor->child);
|
|
new_incoming_blocks.append(predecessor->child);
|
|
new_incoming_values.append(new_value);
|
|
}
|
|
|
|
if (new_incoming_blocks.length == 0) {
|
|
IrInstGen *result = ir_build_unreachable_gen(ira, &phi_instruction->base.base);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
|
|
if (new_incoming_blocks.length == 1) {
|
|
IrInstGen *incoming_value = new_incoming_values.at(0);
|
|
new_incoming_blocks.deinit();
|
|
new_incoming_values.deinit();
|
|
return incoming_value;
|
|
}
|
|
|
|
ZigType *resolved_type = nullptr;
|
|
if (peer_parent != nullptr) {
|
|
bool peer_parent_has_type;
|
|
if ((err = ir_result_has_type(ira, peer_parent->parent, &peer_parent_has_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (peer_parent_has_type) {
|
|
if (peer_parent->parent->id == ResultLocIdReturn) {
|
|
resolved_type = ira->explicit_return_type;
|
|
} else if (peer_parent->parent->id == ResultLocIdCast) {
|
|
resolved_type = ir_resolve_type(ira, peer_parent->parent->source_instruction->child);
|
|
} else if (peer_parent->parent->resolved_loc) {
|
|
ZigType *resolved_loc_ptr_type = peer_parent->parent->resolved_loc->value->type;
|
|
ir_assert(resolved_loc_ptr_type->id == ZigTypeIdPointer, &phi_instruction->base.base);
|
|
resolved_type = resolved_loc_ptr_type->data.pointer.child_type;
|
|
}
|
|
|
|
if (resolved_type != nullptr && type_is_invalid(resolved_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (resolved_type == nullptr) {
|
|
resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.base.source_node, nullptr,
|
|
new_incoming_values.items, new_incoming_values.length);
|
|
if (type_is_invalid(resolved_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
switch (type_has_one_possible_value(ira->codegen, resolved_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_move(ira, &phi_instruction->base.base,
|
|
get_the_one_possible_value(ira->codegen, resolved_type));
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
switch (type_requires_comptime(ira->codegen, resolved_type)) {
|
|
case ReqCompTimeInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeYes:
|
|
ir_add_error(ira, &phi_instruction->base.base,
|
|
buf_sprintf("values of type '%s' must be comptime known", buf_ptr(&resolved_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
|
|
bool all_stack_ptrs = (resolved_type->id == ZigTypeIdPointer);
|
|
|
|
// cast all values to the resolved type. however we can't put cast instructions in front of the phi instruction.
|
|
// so we go back and insert the casts as the last instruction in the corresponding predecessor blocks, and
|
|
// then make sure the branch instruction is preserved.
|
|
IrBasicBlockGen *cur_bb = ira->new_irb.current_basic_block;
|
|
for (size_t i = 0; i < new_incoming_values.length; i += 1) {
|
|
IrInstGen *new_value = new_incoming_values.at(i);
|
|
IrBasicBlockGen *predecessor = new_incoming_blocks.at(i);
|
|
ir_assert(predecessor->instruction_list.length != 0, &phi_instruction->base.base);
|
|
IrInstGen *branch_instruction = predecessor->instruction_list.pop();
|
|
ir_set_cursor_at_end_gen(&ira->new_irb, predecessor);
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, new_value, resolved_type);
|
|
if (type_is_invalid(casted_value->value->type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
new_incoming_values.items[i] = casted_value;
|
|
predecessor->instruction_list.append(branch_instruction);
|
|
|
|
if (all_stack_ptrs && (casted_value->value->special != ConstValSpecialRuntime ||
|
|
casted_value->value->data.rh_ptr != RuntimeHintPtrStack))
|
|
{
|
|
all_stack_ptrs = false;
|
|
}
|
|
}
|
|
ir_set_cursor_at_end_gen(&ira->new_irb, cur_bb);
|
|
|
|
IrInstGen *result = ir_build_phi_gen(ira, &phi_instruction->base.base,
|
|
new_incoming_blocks.length, new_incoming_blocks.items, new_incoming_values.items, resolved_type);
|
|
|
|
if (all_stack_ptrs) {
|
|
assert(result->value->special == ConstValSpecialRuntime);
|
|
result->value->data.rh_ptr = RuntimeHintPtrStack;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_var_ptr(IrAnalyze *ira, IrInstSrcVarPtr *instruction) {
|
|
ZigVar *var = instruction->var;
|
|
IrInstGen *result = ir_get_var_ptr(ira, &instruction->base.base, var);
|
|
if (instruction->crossed_fndef_scope != nullptr && !instr_is_comptime(result)) {
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("'%s' not accessible from inner function", var->name));
|
|
add_error_note(ira->codegen, msg, instruction->crossed_fndef_scope->base.source_node,
|
|
buf_sprintf("crossed function definition here"));
|
|
add_error_note(ira->codegen, msg, var->decl_node,
|
|
buf_sprintf("declared here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align) {
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return get_pointer_to_type_extra2(g,
|
|
ptr_type->data.pointer.child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
ptr_type->data.pointer.ptr_len,
|
|
new_align,
|
|
ptr_type->data.pointer.bit_offset_in_host, ptr_type->data.pointer.host_int_bytes,
|
|
ptr_type->data.pointer.allow_zero,
|
|
ptr_type->data.pointer.vector_index,
|
|
ptr_type->data.pointer.inferred_struct_field,
|
|
ptr_type->data.pointer.sentinel);
|
|
}
|
|
|
|
static ZigType *adjust_ptr_sentinel(CodeGen *g, ZigType *ptr_type, ZigValue *new_sentinel) {
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return get_pointer_to_type_extra2(g,
|
|
ptr_type->data.pointer.child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
ptr_type->data.pointer.ptr_len,
|
|
ptr_type->data.pointer.explicit_alignment,
|
|
ptr_type->data.pointer.bit_offset_in_host, ptr_type->data.pointer.host_int_bytes,
|
|
ptr_type->data.pointer.allow_zero,
|
|
ptr_type->data.pointer.vector_index,
|
|
ptr_type->data.pointer.inferred_struct_field,
|
|
new_sentinel);
|
|
}
|
|
|
|
static ZigType *adjust_slice_align(CodeGen *g, ZigType *slice_type, uint32_t new_align) {
|
|
assert(is_slice(slice_type));
|
|
ZigType *ptr_type = adjust_ptr_align(g, slice_type->data.structure.fields[slice_ptr_index]->type_entry,
|
|
new_align);
|
|
return get_slice_type(g, ptr_type);
|
|
}
|
|
|
|
static ZigType *adjust_ptr_len(CodeGen *g, ZigType *ptr_type, PtrLen ptr_len) {
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return get_pointer_to_type_extra2(g,
|
|
ptr_type->data.pointer.child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
ptr_len,
|
|
ptr_type->data.pointer.explicit_alignment,
|
|
ptr_type->data.pointer.bit_offset_in_host, ptr_type->data.pointer.host_int_bytes,
|
|
ptr_type->data.pointer.allow_zero,
|
|
ptr_type->data.pointer.vector_index,
|
|
ptr_type->data.pointer.inferred_struct_field,
|
|
(ptr_len != PtrLenUnknown) ? nullptr : ptr_type->data.pointer.sentinel);
|
|
}
|
|
|
|
static ZigType *adjust_ptr_allow_zero(CodeGen *g, ZigType *ptr_type, bool allow_zero) {
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return get_pointer_to_type_extra2(g,
|
|
ptr_type->data.pointer.child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
ptr_type->data.pointer.ptr_len,
|
|
ptr_type->data.pointer.explicit_alignment,
|
|
ptr_type->data.pointer.bit_offset_in_host, ptr_type->data.pointer.host_int_bytes,
|
|
allow_zero,
|
|
ptr_type->data.pointer.vector_index,
|
|
ptr_type->data.pointer.inferred_struct_field,
|
|
ptr_type->data.pointer.sentinel);
|
|
}
|
|
|
|
static ZigType *adjust_ptr_const(CodeGen *g, ZigType *ptr_type, bool is_const) {
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return get_pointer_to_type_extra2(g,
|
|
ptr_type->data.pointer.child_type,
|
|
is_const, ptr_type->data.pointer.is_volatile,
|
|
ptr_type->data.pointer.ptr_len,
|
|
ptr_type->data.pointer.explicit_alignment,
|
|
ptr_type->data.pointer.bit_offset_in_host, ptr_type->data.pointer.host_int_bytes,
|
|
ptr_type->data.pointer.allow_zero,
|
|
ptr_type->data.pointer.vector_index,
|
|
ptr_type->data.pointer.inferred_struct_field,
|
|
ptr_type->data.pointer.sentinel);
|
|
}
|
|
|
|
static Error compute_elem_align(IrAnalyze *ira, ZigType *elem_type, uint32_t base_ptr_align,
|
|
uint64_t elem_index, uint32_t *result)
|
|
{
|
|
Error err;
|
|
|
|
if (base_ptr_align == 0) {
|
|
*result = 0;
|
|
return ErrorNone;
|
|
}
|
|
|
|
// figure out the largest alignment possible
|
|
if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
uint64_t elem_size = type_size(ira->codegen, elem_type);
|
|
uint64_t abi_align = get_abi_alignment(ira->codegen, elem_type);
|
|
uint64_t ptr_align = base_ptr_align;
|
|
|
|
uint64_t chosen_align = abi_align;
|
|
if (ptr_align >= abi_align) {
|
|
while (ptr_align > abi_align) {
|
|
if ((elem_index * elem_size) % ptr_align == 0) {
|
|
chosen_align = ptr_align;
|
|
break;
|
|
}
|
|
ptr_align >>= 1;
|
|
}
|
|
} else if (elem_size >= ptr_align && elem_size % ptr_align == 0) {
|
|
chosen_align = ptr_align;
|
|
} else {
|
|
// can't get here because guaranteed elem_size >= abi_align
|
|
zig_unreachable();
|
|
}
|
|
|
|
*result = chosen_align;
|
|
return ErrorNone;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstSrcElemPtr *elem_ptr_instruction) {
|
|
Error err;
|
|
IrInstGen *array_ptr = elem_ptr_instruction->array_ptr->child;
|
|
if (type_is_invalid(array_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *elem_index = elem_ptr_instruction->elem_index->child;
|
|
if (type_is_invalid(elem_index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *orig_array_ptr_val = array_ptr->value;
|
|
|
|
ZigType *ptr_type = orig_array_ptr_val->type;
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
|
|
ZigType *array_type = ptr_type->data.pointer.child_type;
|
|
|
|
// At first return_type will be the pointer type we want to return, except with an optimistic alignment.
|
|
// We will adjust return_type's alignment before returning it.
|
|
ZigType *return_type;
|
|
|
|
if (type_is_invalid(array_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (array_type->id == ZigTypeIdPointer &&
|
|
array_type->data.pointer.ptr_len == PtrLenSingle &&
|
|
array_type->data.pointer.child_type->id == ZigTypeIdArray)
|
|
{
|
|
IrInstGen *ptr_value = ir_get_deref(ira, &elem_ptr_instruction->base.base,
|
|
array_ptr, nullptr);
|
|
if (type_is_invalid(ptr_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
array_type = array_type->data.pointer.child_type;
|
|
ptr_type = ptr_type->data.pointer.child_type;
|
|
|
|
orig_array_ptr_val = ptr_value->value;
|
|
}
|
|
|
|
if (array_type->id == ZigTypeIdArray) {
|
|
if(array_type->data.array.len == 0 && array_type->data.array.sentinel == nullptr){
|
|
ir_add_error(ira, &elem_ptr_instruction->base.base, buf_sprintf("accessing a zero length array is not allowed"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_type = array_type->data.array.child_type;
|
|
if (ptr_type->data.pointer.host_int_bytes == 0) {
|
|
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
elem_ptr_instruction->ptr_len,
|
|
ptr_type->data.pointer.explicit_alignment, 0, 0, false);
|
|
} else {
|
|
uint64_t elem_val_scalar;
|
|
if (!ir_resolve_usize(ira, elem_index, &elem_val_scalar))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t bit_width = type_size_bits(ira->codegen, child_type);
|
|
size_t bit_offset = bit_width * elem_val_scalar;
|
|
|
|
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
elem_ptr_instruction->ptr_len,
|
|
1, (uint32_t)bit_offset, ptr_type->data.pointer.host_int_bytes, false);
|
|
}
|
|
} else if (array_type->id == ZigTypeIdPointer) {
|
|
if (array_type->data.pointer.ptr_len == PtrLenSingle) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("index of single-item pointer"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len);
|
|
} else if (is_slice(array_type)) {
|
|
return_type = adjust_ptr_len(ira->codegen, array_type->data.structure.fields[slice_ptr_index]->type_entry,
|
|
elem_ptr_instruction->ptr_len);
|
|
} else if (array_type->id == ZigTypeIdVector) {
|
|
// This depends on whether the element index is comptime, so it is computed later.
|
|
return_type = nullptr;
|
|
} else if (elem_ptr_instruction->init_array_type_source_node != nullptr &&
|
|
array_type->id == ZigTypeIdStruct &&
|
|
array_type->data.structure.resolve_status == ResolveStatusBeingInferred)
|
|
{
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
IrInstGen *casted_elem_index = ir_implicit_cast(ira, elem_index, usize);
|
|
if (type_is_invalid(casted_elem_index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ir_assert(instr_is_comptime(casted_elem_index), &elem_ptr_instruction->base.base);
|
|
Buf *field_name = buf_alloc();
|
|
bigint_append_buf(field_name, &casted_elem_index->value->data.x_bigint, 10);
|
|
return ir_analyze_inferred_field_ptr(ira, field_name, &elem_ptr_instruction->base.base,
|
|
array_ptr, array_type);
|
|
} else if (is_tuple(array_type)) {
|
|
uint64_t elem_index_scalar;
|
|
if (!ir_resolve_usize(ira, elem_index, &elem_index_scalar))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (elem_index_scalar >= array_type->data.structure.src_field_count) {
|
|
ir_add_error(ira, &elem_ptr_instruction->base.base, buf_sprintf(
|
|
"field index %" ZIG_PRI_u64 " outside tuple '%s' which has %" PRIu32 " fields",
|
|
elem_index_scalar, buf_ptr(&array_type->name),
|
|
array_type->data.structure.src_field_count));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
TypeStructField *field = array_type->data.structure.fields[elem_index_scalar];
|
|
return ir_analyze_struct_field_ptr(ira, &elem_ptr_instruction->base.base, field, array_ptr,
|
|
array_type, false);
|
|
} else {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("array access of non-array type '%s'", buf_ptr(&array_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
IrInstGen *casted_elem_index = ir_implicit_cast(ira, elem_index, usize);
|
|
if (type_is_invalid(casted_elem_index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool safety_check_on = elem_ptr_instruction->safety_check_on;
|
|
if (instr_is_comptime(casted_elem_index)) {
|
|
ZigValue *index_val = ir_resolve_const(ira, casted_elem_index, UndefBad);
|
|
if (index_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
uint64_t index = bigint_as_u64(&index_val->data.x_bigint);
|
|
|
|
if (array_type->id == ZigTypeIdArray) {
|
|
uint64_t array_len = array_type->data.array.len +
|
|
(array_type->data.array.sentinel != nullptr);
|
|
if (index >= array_len) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("index %" ZIG_PRI_u64 " outside array of size %" ZIG_PRI_u64,
|
|
index, array_len));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
safety_check_on = false;
|
|
} else if (array_type->id == ZigTypeIdVector) {
|
|
uint64_t vector_len = array_type->data.vector.len;
|
|
if (index >= vector_len) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("index %" ZIG_PRI_u64 " outside vector of size %" ZIG_PRI_u64,
|
|
index, vector_len));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
safety_check_on = false;
|
|
}
|
|
|
|
if (array_type->id == ZigTypeIdVector) {
|
|
ZigType *elem_type = array_type->data.vector.elem_type;
|
|
uint32_t host_vec_len = array_type->data.vector.len;
|
|
return_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
elem_ptr_instruction->ptr_len,
|
|
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, (uint32_t)index,
|
|
nullptr, nullptr);
|
|
} else if (return_type->data.pointer.explicit_alignment != 0) {
|
|
uint32_t chosen_align;
|
|
if ((err = compute_elem_align(ira, return_type->data.pointer.child_type,
|
|
return_type->data.pointer.explicit_alignment, index, &chosen_align)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return_type = adjust_ptr_align(ira->codegen, return_type, chosen_align);
|
|
}
|
|
|
|
// TODO The `array_type->id == ZigTypeIdArray` exception here should not be an exception;
|
|
// the `orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar` clause should be omitted completely.
|
|
// However there are bugs to fix before this improvement can be made.
|
|
if (orig_array_ptr_val->special != ConstValSpecialRuntime &&
|
|
orig_array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr &&
|
|
(orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == ZigTypeIdArray))
|
|
{
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
elem_ptr_instruction->base.base.source_node, orig_array_ptr_val, UndefBad)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *array_ptr_val = const_ptr_pointee(ira, ira->codegen, orig_array_ptr_val,
|
|
elem_ptr_instruction->base.base.source_node);
|
|
if (array_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (array_ptr_val->special == ConstValSpecialUndef &&
|
|
elem_ptr_instruction->init_array_type_source_node != nullptr)
|
|
{
|
|
if (array_type->id == ZigTypeIdArray || array_type->id == ZigTypeIdVector) {
|
|
array_ptr_val->data.x_array.special = ConstArraySpecialNone;
|
|
array_ptr_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(array_type->data.array.len);
|
|
array_ptr_val->special = ConstValSpecialStatic;
|
|
for (size_t i = 0; i < array_type->data.array.len; i += 1) {
|
|
ZigValue *elem_val = &array_ptr_val->data.x_array.data.s_none.elements[i];
|
|
elem_val->special = ConstValSpecialUndef;
|
|
elem_val->type = array_type->data.array.child_type;
|
|
elem_val->parent.id = ConstParentIdArray;
|
|
elem_val->parent.data.p_array.array_val = array_ptr_val;
|
|
elem_val->parent.data.p_array.elem_index = i;
|
|
}
|
|
} else if (is_slice(array_type)) {
|
|
ir_assert(array_ptr->value->type->id == ZigTypeIdPointer, &elem_ptr_instruction->base.base);
|
|
ZigType *actual_array_type = array_ptr->value->type->data.pointer.child_type;
|
|
|
|
if (type_is_invalid(actual_array_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (actual_array_type->id != ZigTypeIdArray) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->init_array_type_source_node,
|
|
buf_sprintf("array literal requires address-of operator to coerce to slice type '%s'",
|
|
buf_ptr(&actual_array_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *array_init_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
array_init_val->special = ConstValSpecialStatic;
|
|
array_init_val->type = actual_array_type;
|
|
array_init_val->data.x_array.special = ConstArraySpecialNone;
|
|
array_init_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(actual_array_type->data.array.len);
|
|
array_init_val->special = ConstValSpecialStatic;
|
|
for (size_t i = 0; i < actual_array_type->data.array.len; i += 1) {
|
|
ZigValue *elem_val = &array_init_val->data.x_array.data.s_none.elements[i];
|
|
elem_val->special = ConstValSpecialUndef;
|
|
elem_val->type = actual_array_type->data.array.child_type;
|
|
elem_val->parent.id = ConstParentIdArray;
|
|
elem_val->parent.data.p_array.array_val = array_init_val;
|
|
elem_val->parent.data.p_array.elem_index = i;
|
|
}
|
|
|
|
init_const_slice(ira->codegen, array_ptr_val, array_init_val, 0, actual_array_type->data.array.len,
|
|
false);
|
|
array_ptr_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutInfer;
|
|
} else {
|
|
ir_add_error_node(ira, elem_ptr_instruction->init_array_type_source_node,
|
|
buf_sprintf("expected array type or [_], found '%s'",
|
|
buf_ptr(&array_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (array_ptr_val->special != ConstValSpecialRuntime &&
|
|
(array_type->id != ZigTypeIdPointer ||
|
|
array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr))
|
|
{
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
elem_ptr_instruction->base.base.source_node, array_ptr_val, UndefOk)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (array_type->id == ZigTypeIdPointer) {
|
|
IrInstGen *result = ir_const(ira, &elem_ptr_instruction->base.base, return_type);
|
|
ZigValue *out_val = result->value;
|
|
out_val->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
|
size_t new_index;
|
|
size_t mem_size;
|
|
size_t old_size;
|
|
switch (array_ptr_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
if (array_ptr_val->data.x_ptr.data.ref.pointee->type->id == ZigTypeIdArray) {
|
|
ZigValue *array_val = array_ptr_val->data.x_ptr.data.ref.pointee;
|
|
new_index = index;
|
|
ZigType *array_type = array_val->type;
|
|
mem_size = array_type->data.array.len;
|
|
if (array_type->data.array.sentinel != nullptr) {
|
|
mem_size += 1;
|
|
}
|
|
old_size = mem_size;
|
|
|
|
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_ptr.data.base_array.array_val = array_val;
|
|
out_val->data.x_ptr.data.base_array.elem_index = new_index;
|
|
} else {
|
|
mem_size = 1;
|
|
old_size = 1;
|
|
new_index = index;
|
|
|
|
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
out_val->data.x_ptr.data.ref.pointee = array_ptr_val->data.x_ptr.data.ref.pointee;
|
|
}
|
|
break;
|
|
case ConstPtrSpecialBaseArray:
|
|
case ConstPtrSpecialSubArray:
|
|
{
|
|
size_t offset = array_ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
new_index = offset + index;
|
|
ZigType *array_type = array_ptr_val->data.x_ptr.data.base_array.array_val->type;
|
|
mem_size = array_type->data.array.len;
|
|
if (array_type->data.array.sentinel != nullptr) {
|
|
mem_size += 1;
|
|
}
|
|
old_size = mem_size - offset;
|
|
|
|
assert(array_ptr_val->data.x_ptr.data.base_array.array_val);
|
|
|
|
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_ptr.data.base_array.array_val =
|
|
array_ptr_val->data.x_ptr.data.base_array.array_val;
|
|
out_val->data.x_ptr.data.base_array.elem_index = new_index;
|
|
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO elem ptr on a const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO elem ptr on a const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO elem ptr on a const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO elem ptr on a const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO element ptr of a function casted to a ptr");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO elem ptr on a null pointer");
|
|
}
|
|
if (new_index >= mem_size) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("index %" ZIG_PRI_u64 " outside pointer of size %" ZIG_PRI_usize "", index, old_size));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return result;
|
|
} else if (is_slice(array_type)) {
|
|
expand_undef_struct(ira->codegen, array_ptr_val);
|
|
|
|
ZigValue *ptr_field = array_ptr_val->data.x_struct.fields[slice_ptr_index];
|
|
ir_assert(ptr_field != nullptr, &elem_ptr_instruction->base.base);
|
|
if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
|
|
return ir_build_elem_ptr_gen(ira, elem_ptr_instruction->base.base.scope,
|
|
elem_ptr_instruction->base.base.source_node, array_ptr, casted_elem_index, false,
|
|
return_type);
|
|
}
|
|
ZigValue *len_field = array_ptr_val->data.x_struct.fields[slice_len_index];
|
|
IrInstGen *result = ir_const(ira, &elem_ptr_instruction->base.base, return_type);
|
|
ZigValue *out_val = result->value;
|
|
ZigType *slice_ptr_type = array_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
uint64_t slice_len = bigint_as_u64(&len_field->data.x_bigint);
|
|
uint64_t full_slice_len = slice_len +
|
|
((slice_ptr_type->data.pointer.sentinel != nullptr) ? 1 : 0);
|
|
if (index >= full_slice_len) {
|
|
ir_add_error_node(ira, elem_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("index %" ZIG_PRI_u64 " outside slice of size %" ZIG_PRI_u64,
|
|
index, slice_len));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
out_val->data.x_ptr.mut = ptr_field->data.x_ptr.mut;
|
|
switch (ptr_field->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
out_val->data.x_ptr.data.ref.pointee = ptr_field->data.x_ptr.data.ref.pointee;
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
{
|
|
size_t offset = ptr_field->data.x_ptr.data.base_array.elem_index;
|
|
uint64_t new_index = offset + index;
|
|
if (ptr_field->data.x_ptr.data.base_array.array_val->data.x_array.special !=
|
|
ConstArraySpecialBuf)
|
|
{
|
|
ir_assert(new_index <
|
|
ptr_field->data.x_ptr.data.base_array.array_val->type->data.array.len,
|
|
&elem_ptr_instruction->base.base);
|
|
}
|
|
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_ptr.data.base_array.array_val =
|
|
ptr_field->data.x_ptr.data.base_array.array_val;
|
|
out_val->data.x_ptr.data.base_array.elem_index = new_index;
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO elem ptr on a slice backed by const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO elem ptr on a slice backed by const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO elem ptr on a slice backed by const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO elem ptr on a slice backed by const optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO elem ptr on a slice that was ptrcast from a function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO elem ptr on a slice has a null pointer");
|
|
}
|
|
return result;
|
|
} else if (array_type->id == ZigTypeIdArray || array_type->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, array_ptr_val);
|
|
|
|
IrInstGen *result;
|
|
if (orig_array_ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_elem_ptr_gen(ira, elem_ptr_instruction->base.base.scope,
|
|
elem_ptr_instruction->base.base.source_node, array_ptr, casted_elem_index,
|
|
false, return_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, &elem_ptr_instruction->base.base, return_type);
|
|
}
|
|
ZigValue *out_val = result->value;
|
|
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
|
|
out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut;
|
|
out_val->data.x_ptr.data.base_array.array_val = array_ptr_val;
|
|
out_val->data.x_ptr.data.base_array.elem_index = index;
|
|
return result;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
}
|
|
} else if (array_type->id == ZigTypeIdVector) {
|
|
// runtime known element index
|
|
ZigType *elem_type = array_type->data.vector.elem_type;
|
|
uint32_t host_vec_len = array_type->data.vector.len;
|
|
return_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
elem_ptr_instruction->ptr_len,
|
|
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, VECTOR_INDEX_RUNTIME,
|
|
nullptr, nullptr);
|
|
} else {
|
|
// runtime known element index
|
|
switch (type_requires_comptime(ira->codegen, return_type)) {
|
|
case ReqCompTimeYes:
|
|
ir_add_error(ira, &elem_index->base,
|
|
buf_sprintf("values of type '%s' must be comptime known, but index value is runtime known",
|
|
buf_ptr(&return_type->data.pointer.child_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
|
|
if (return_type->data.pointer.explicit_alignment != 0) {
|
|
if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
|
|
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
|
|
uint64_t ptr_align = get_ptr_align(ira->codegen, return_type);
|
|
if (ptr_align < abi_align) {
|
|
if (elem_size >= ptr_align && elem_size % ptr_align == 0) {
|
|
return_type = adjust_ptr_align(ira->codegen, return_type, ptr_align);
|
|
} else {
|
|
// can't get here because guaranteed elem_size >= abi_align
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
return_type = adjust_ptr_align(ira->codegen, return_type, abi_align);
|
|
}
|
|
}
|
|
}
|
|
|
|
return ir_build_elem_ptr_gen(ira, elem_ptr_instruction->base.base.scope,
|
|
elem_ptr_instruction->base.base.source_node, array_ptr, casted_elem_index, safety_check_on, return_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_container_member_access_inner(IrAnalyze *ira,
|
|
ZigType *bare_struct_type, Buf *field_name, IrInst* source_instr,
|
|
IrInstGen *container_ptr, IrInst *container_ptr_src, ZigType *container_type)
|
|
{
|
|
if (!is_slice(bare_struct_type)) {
|
|
ScopeDecls *container_scope = get_container_scope(bare_struct_type);
|
|
assert(container_scope != nullptr);
|
|
auto tld = find_container_decl(ira->codegen, container_scope, field_name);
|
|
if (tld) {
|
|
if (tld->id == TldIdFn) {
|
|
resolve_top_level_decl(ira->codegen, tld, source_instr->source_node, false);
|
|
if (tld->resolution == TldResolutionInvalid)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (tld->resolution == TldResolutionResolving)
|
|
return ir_error_dependency_loop(ira, source_instr);
|
|
|
|
if (tld->visib_mod == VisibModPrivate &&
|
|
tld->import != get_scope_import(source_instr->scope))
|
|
{
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("'%s' is private", buf_ptr(field_name)));
|
|
add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("declared here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
TldFn *tld_fn = (TldFn *)tld;
|
|
ZigFn *fn_entry = tld_fn->fn_entry;
|
|
assert(fn_entry != nullptr);
|
|
|
|
if (type_is_invalid(fn_entry->type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *bound_fn_value = ir_const_bound_fn(ira, source_instr, fn_entry, container_ptr,
|
|
container_ptr_src);
|
|
return ir_get_ref(ira, source_instr, bound_fn_value, true, false);
|
|
} else if (tld->id == TldIdVar) {
|
|
resolve_top_level_decl(ira->codegen, tld, source_instr->source_node, false);
|
|
if (tld->resolution == TldResolutionInvalid)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (tld->resolution == TldResolutionResolving)
|
|
return ir_error_dependency_loop(ira, source_instr);
|
|
|
|
TldVar *tld_var = (TldVar *)tld;
|
|
ZigVar *var = tld_var->var;
|
|
assert(var != nullptr);
|
|
|
|
if (type_is_invalid(var->var_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (var->const_value->type->id == ZigTypeIdFn) {
|
|
ir_assert(var->const_value->data.x_ptr.special == ConstPtrSpecialFunction, source_instr);
|
|
ZigFn *fn = var->const_value->data.x_ptr.data.fn.fn_entry;
|
|
IrInstGen *bound_fn_value = ir_const_bound_fn(ira, source_instr, fn, container_ptr,
|
|
container_ptr_src);
|
|
return ir_get_ref(ira, source_instr, bound_fn_value, true, false);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
const char *prefix_name;
|
|
if (is_slice(bare_struct_type)) {
|
|
prefix_name = "";
|
|
} else if (bare_struct_type->id == ZigTypeIdStruct) {
|
|
prefix_name = "struct ";
|
|
} else if (bare_struct_type->id == ZigTypeIdEnum) {
|
|
prefix_name = "enum ";
|
|
} else if (bare_struct_type->id == ZigTypeIdUnion) {
|
|
prefix_name = "union ";
|
|
} else if (bare_struct_type->id == ZigTypeIdOpaque) {
|
|
prefix_name = "opaque type ";
|
|
} else {
|
|
prefix_name = "";
|
|
}
|
|
ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("no member named '%s' in %s'%s'", buf_ptr(field_name), prefix_name, buf_ptr(&bare_struct_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static void memoize_field_init_val(CodeGen *codegen, ZigType *container_type, TypeStructField *field) {
|
|
if (field->init_val != nullptr) return;
|
|
if (field->decl_node == nullptr) return;
|
|
if (field->decl_node->type != NodeTypeStructField) return;
|
|
AstNode *init_node = field->decl_node->data.struct_field.value;
|
|
if (init_node == nullptr) return;
|
|
// scope is not the scope of the struct init, it's the scope of the struct type decl
|
|
Scope *analyze_scope = &get_container_scope(container_type)->base;
|
|
// memoize it
|
|
field->init_val = analyze_const_value(codegen, analyze_scope, init_node,
|
|
field->type_entry, nullptr, UndefOk);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInst* source_instr,
|
|
TypeStructField *field, IrInstGen *struct_ptr, ZigType *struct_type, bool initializing)
|
|
{
|
|
Error err;
|
|
ZigType *field_type = resolve_struct_field_type(ira->codegen, field);
|
|
if (field_type == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (field->is_comptime) {
|
|
IrInstGen *elem = ir_const(ira, source_instr, field_type);
|
|
memoize_field_init_val(ira->codegen, struct_type, field);
|
|
if(field->init_val != nullptr && type_is_invalid(field->init_val->type)){
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
copy_const_val(ira->codegen, elem->value, field->init_val);
|
|
return ir_get_ref2(ira, source_instr, elem, field_type, true, false);
|
|
}
|
|
switch (type_has_one_possible_value(ira->codegen, field_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes: {
|
|
IrInstGen *elem = ir_const_move(ira, source_instr,
|
|
get_the_one_possible_value(ira->codegen, field_type));
|
|
return ir_get_ref(ira, source_instr, elem,
|
|
struct_ptr->value->type->data.pointer.is_const,
|
|
struct_ptr->value->type->data.pointer.is_volatile);
|
|
}
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
bool is_const = struct_ptr->value->type->data.pointer.is_const;
|
|
bool is_volatile = struct_ptr->value->type->data.pointer.is_volatile;
|
|
ZigType *ptr_type;
|
|
if (is_anon_container(struct_type)) {
|
|
ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
|
|
is_const, is_volatile, PtrLenSingle, 0, 0, 0, false);
|
|
} else {
|
|
ResolveStatus needed_resolve_status =
|
|
(struct_type->data.structure.layout == ContainerLayoutAuto) ?
|
|
ResolveStatusZeroBitsKnown : ResolveStatusSizeKnown;
|
|
if ((err = type_resolve(ira->codegen, struct_type, needed_resolve_status)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
assert(struct_ptr->value->type->id == ZigTypeIdPointer);
|
|
uint32_t ptr_bit_offset = struct_ptr->value->type->data.pointer.bit_offset_in_host;
|
|
uint32_t ptr_host_int_bytes = struct_ptr->value->type->data.pointer.host_int_bytes;
|
|
uint32_t host_int_bytes_for_result_type = (ptr_host_int_bytes == 0) ?
|
|
get_host_int_bytes(ira->codegen, struct_type, field) : ptr_host_int_bytes;
|
|
ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
|
|
is_const, is_volatile, PtrLenSingle, field->align,
|
|
(uint32_t)(ptr_bit_offset + field->bit_offset_in_host),
|
|
(uint32_t)host_int_bytes_for_result_type, false);
|
|
}
|
|
if (instr_is_comptime(struct_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, struct_ptr, UndefBad);
|
|
if (!ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
ZigValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
|
|
if (struct_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (type_is_invalid(struct_val->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// This to allow lazy values to be resolved.
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
source_instr->source_node, struct_val, UndefOk)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (initializing && struct_val->special == ConstValSpecialUndef) {
|
|
struct_val->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, struct_type->data.structure.src_field_count);
|
|
struct_val->special = ConstValSpecialStatic;
|
|
for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) {
|
|
ZigValue *field_val = struct_val->data.x_struct.fields[i];
|
|
field_val->special = ConstValSpecialUndef;
|
|
field_val->type = resolve_struct_field_type(ira->codegen,
|
|
struct_type->data.structure.fields[i]);
|
|
field_val->parent.id = ConstParentIdStruct;
|
|
field_val->parent.data.p_struct.struct_val = struct_val;
|
|
field_val->parent.data.p_struct.field_index = i;
|
|
}
|
|
}
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_struct_field_ptr(ira, source_instr, struct_ptr, field, ptr_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, source_instr, ptr_type);
|
|
}
|
|
ZigValue *const_val = result->value;
|
|
const_val->data.x_ptr.special = ConstPtrSpecialBaseStruct;
|
|
const_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut;
|
|
const_val->data.x_ptr.data.base_struct.struct_val = struct_val;
|
|
const_val->data.x_ptr.data.base_struct.field_index = field->src_index;
|
|
return result;
|
|
}
|
|
}
|
|
return ir_build_struct_field_ptr(ira, source_instr, struct_ptr, field, ptr_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
|
|
IrInst* source_instr, IrInstGen *container_ptr, ZigType *container_type)
|
|
{
|
|
// The type of the field is not available until a store using this pointer happens.
|
|
// So, here we create a special pointer type which has the inferred struct type and
|
|
// field name encoded in the type. Later, when there is a store via this pointer,
|
|
// the field type will then be available, and the field will be added to the inferred
|
|
// struct.
|
|
|
|
ZigType *container_ptr_type = container_ptr->value->type;
|
|
ir_assert(container_ptr_type->id == ZigTypeIdPointer, source_instr);
|
|
|
|
InferredStructField *inferred_struct_field = heap::c_allocator.create<InferredStructField>();
|
|
inferred_struct_field->inferred_struct_type = container_type;
|
|
inferred_struct_field->field_name = field_name;
|
|
|
|
ZigType *elem_type = ira->codegen->builtin_types.entry_anytype;
|
|
ZigType *field_ptr_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
|
container_ptr_type->data.pointer.is_const, container_ptr_type->data.pointer.is_volatile,
|
|
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE, inferred_struct_field, nullptr);
|
|
|
|
if (instr_is_comptime(container_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_cast(ira, source_instr, container_ptr_type, container_ptr, CastOpNoop);
|
|
} else {
|
|
result = ir_const(ira, source_instr, field_ptr_type);
|
|
}
|
|
copy_const_val(ira->codegen, result->value, ptr_val);
|
|
result->value->type = field_ptr_type;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_cast(ira, source_instr, field_ptr_type, container_ptr, CastOpNoop);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
|
|
IrInst* source_instr, IrInstGen *container_ptr, IrInst *container_ptr_src,
|
|
ZigType *container_type, bool initializing)
|
|
{
|
|
Error err;
|
|
|
|
ZigType *bare_type = container_ref_type(container_type);
|
|
|
|
if (initializing && bare_type->id == ZigTypeIdStruct &&
|
|
bare_type->data.structure.resolve_status == ResolveStatusBeingInferred)
|
|
{
|
|
return ir_analyze_inferred_field_ptr(ira, field_name, source_instr, container_ptr, bare_type);
|
|
}
|
|
|
|
// Tracks wether we should return an undefined value of the correct type.
|
|
// We do this if the container pointer is undefined and we are in a TypeOf call.
|
|
bool return_undef = container_ptr->value->special == ConstValSpecialUndef && \
|
|
get_scope_typeof(source_instr->scope) != nullptr;
|
|
|
|
if ((err = type_resolve(ira->codegen, bare_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(container_ptr->value->type->id == ZigTypeIdPointer);
|
|
if (bare_type->id == ZigTypeIdStruct) {
|
|
TypeStructField *field = find_struct_type_field(bare_type, field_name);
|
|
if (field != nullptr) {
|
|
if (return_undef) {
|
|
ZigType *field_ptr_type = get_pointer_to_type(ira->codegen, resolve_struct_field_type(ira->codegen, field),
|
|
container_ptr->value->type->data.pointer.is_const);
|
|
return ir_const_undef(ira, source_instr, field_ptr_type);
|
|
}
|
|
|
|
return ir_analyze_struct_field_ptr(ira, source_instr, field, container_ptr, bare_type, initializing);
|
|
} else {
|
|
return ir_analyze_container_member_access_inner(ira, bare_type, field_name,
|
|
source_instr, container_ptr, container_ptr_src, container_type);
|
|
}
|
|
}
|
|
|
|
if (bare_type->id == ZigTypeIdEnum || bare_type->id == ZigTypeIdOpaque) {
|
|
return ir_analyze_container_member_access_inner(ira, bare_type, field_name,
|
|
source_instr, container_ptr, container_ptr_src, container_type);
|
|
}
|
|
|
|
if (bare_type->id == ZigTypeIdUnion) {
|
|
bool is_const = container_ptr->value->type->data.pointer.is_const;
|
|
bool is_volatile = container_ptr->value->type->data.pointer.is_volatile;
|
|
|
|
TypeUnionField *field = find_union_type_field(bare_type, field_name);
|
|
if (field == nullptr) {
|
|
return ir_analyze_container_member_access_inner(ira, bare_type, field_name,
|
|
source_instr, container_ptr, container_ptr_src, container_type);
|
|
}
|
|
|
|
ZigType *field_type = resolve_union_field_type(ira->codegen, field);
|
|
if (field_type == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
|
|
is_const, is_volatile, PtrLenSingle, 0, 0, 0, false);
|
|
if (instr_is_comptime(container_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
|
|
if (!ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
|
|
ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
ZigValue *union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
|
|
if (union_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (type_is_invalid(union_val->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Reject undefined values unless we're intializing the union:
|
|
// a undefined union means also the tag is undefined, accessing
|
|
// its payload slot is UB.
|
|
const UndefAllowed allow_undef = initializing ? UndefOk : UndefBad;
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
source_instr->source_node, union_val, allow_undef)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (initializing) {
|
|
ZigValue *payload_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
payload_val->special = ConstValSpecialUndef;
|
|
payload_val->type = field_type;
|
|
payload_val->parent.id = ConstParentIdUnion;
|
|
payload_val->parent.data.p_union.union_val = union_val;
|
|
|
|
union_val->special = ConstValSpecialStatic;
|
|
bigint_init_bigint(&union_val->data.x_union.tag, &field->enum_field->value);
|
|
union_val->data.x_union.payload = payload_val;
|
|
} else if (bare_type->data.unionation.layout != ContainerLayoutExtern) {
|
|
TypeUnionField *actual_field = find_union_field_by_tag(bare_type, &union_val->data.x_union.tag);
|
|
if (actual_field == nullptr)
|
|
zig_unreachable();
|
|
|
|
if (field != actual_field) {
|
|
ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("accessing union field '%s' while field '%s' is set", buf_ptr(field_name),
|
|
buf_ptr(actual_field->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
ZigValue *payload_val = union_val->data.x_union.payload;
|
|
assert(payload_val);
|
|
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_union_field_ptr(ira, source_instr, container_ptr, field, true,
|
|
initializing, ptr_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, source_instr, ptr_type);
|
|
}
|
|
ZigValue *const_val = result->value;
|
|
const_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
const_val->data.x_ptr.mut = container_ptr->value->data.x_ptr.mut;
|
|
const_val->data.x_ptr.data.ref.pointee = payload_val;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_union_field_ptr(ira, source_instr, container_ptr, field, true, initializing, ptr_type);
|
|
}
|
|
|
|
zig_unreachable();
|
|
}
|
|
|
|
static void add_link_lib_symbol(IrAnalyze *ira, Buf *lib_name, Buf *symbol_name, AstNode *source_node) {
|
|
const char *msg = stage2_add_link_lib(&ira->codegen->stage1, buf_ptr(lib_name), buf_len(lib_name),
|
|
buf_ptr(symbol_name), buf_len(symbol_name));
|
|
if (msg != nullptr) {
|
|
ir_add_error_node(ira, source_node, buf_create_from_str(msg));
|
|
ira->codegen->reported_bad_link_libc_error = true;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_error_dependency_loop(IrAnalyze *ira, IrInst* source_instr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("dependency loop detected"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_decl_ref(IrAnalyze *ira, IrInst* source_instruction, Tld *tld) {
|
|
resolve_top_level_decl(ira->codegen, tld, source_instruction->source_node, true);
|
|
if (tld->resolution == TldResolutionInvalid) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (tld->resolution == TldResolutionResolving)
|
|
return ir_error_dependency_loop(ira, source_instruction);
|
|
|
|
switch (tld->id) {
|
|
case TldIdContainer:
|
|
case TldIdCompTime:
|
|
case TldIdUsingNamespace:
|
|
zig_unreachable();
|
|
case TldIdVar: {
|
|
TldVar *tld_var = (TldVar *)tld;
|
|
ZigVar *var = tld_var->var;
|
|
assert(var != nullptr);
|
|
|
|
if (tld_var->extern_lib_name != nullptr) {
|
|
add_link_lib_symbol(ira, tld_var->extern_lib_name, buf_create_from_str(var->name),
|
|
source_instruction->source_node);
|
|
}
|
|
|
|
return ir_get_var_ptr(ira, source_instruction, var);
|
|
}
|
|
case TldIdFn: {
|
|
TldFn *tld_fn = (TldFn *)tld;
|
|
ZigFn *fn_entry = tld_fn->fn_entry;
|
|
assert(fn_entry->type_entry != nullptr);
|
|
|
|
if (type_is_invalid(fn_entry->type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (tld_fn->extern_lib_name != nullptr) {
|
|
add_link_lib_symbol(ira, tld_fn->extern_lib_name, &fn_entry->symbol_name, source_instruction->source_node);
|
|
}
|
|
|
|
IrInstGen *fn_inst = ir_const_fn(ira, source_instruction, fn_entry);
|
|
return ir_get_ref(ira, source_instruction, fn_inst, true, false);
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ErrorTableEntry *find_err_table_entry(ZigType *err_set_type, Buf *field_name) {
|
|
assert(err_set_type->id == ZigTypeIdErrorSet);
|
|
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *err_table_entry = err_set_type->data.error_set.errors[i];
|
|
if (buf_eql_buf(&err_table_entry->name, field_name)) {
|
|
return err_table_entry;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstSrcFieldPtr *field_ptr_instruction) {
|
|
Error err;
|
|
IrInstGen *container_ptr = field_ptr_instruction->container_ptr->child;
|
|
if (type_is_invalid(container_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *container_type = container_ptr->value->type->data.pointer.child_type;
|
|
|
|
Buf *field_name = field_ptr_instruction->field_name_buffer;
|
|
if (!field_name) {
|
|
IrInstGen *field_name_expr = field_ptr_instruction->field_name_expr->child;
|
|
field_name = ir_resolve_str(ira, field_name_expr);
|
|
if (!field_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
|
|
AstNode *source_node = field_ptr_instruction->base.base.source_node;
|
|
|
|
if (type_is_invalid(container_type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (is_tuple(container_type) && !field_ptr_instruction->initializing && buf_eql_str(field_name, "len")) {
|
|
IrInstGen *len_inst = ir_const_unsigned(ira, &field_ptr_instruction->base.base,
|
|
container_type->data.structure.src_field_count);
|
|
return ir_get_ref(ira, &field_ptr_instruction->base.base, len_inst, true, false);
|
|
} else if (is_slice(container_type) || is_container_ref(container_type)) {
|
|
assert(container_ptr->value->type->id == ZigTypeIdPointer);
|
|
if (container_type->id == ZigTypeIdPointer) {
|
|
ZigType *bare_type = container_ref_type(container_type);
|
|
IrInstGen *container_child = ir_get_deref(ira, &field_ptr_instruction->base.base, container_ptr, nullptr);
|
|
IrInstGen *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base.base,
|
|
container_child, &field_ptr_instruction->container_ptr->base, bare_type,
|
|
field_ptr_instruction->initializing);
|
|
return result;
|
|
} else {
|
|
IrInstGen *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base.base,
|
|
container_ptr, &field_ptr_instruction->container_ptr->base, container_type,
|
|
field_ptr_instruction->initializing);
|
|
return result;
|
|
}
|
|
} else if (is_array_ref(container_type) && !field_ptr_instruction->initializing) {
|
|
if (buf_eql_str(field_name, "len")) {
|
|
ZigValue *len_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
if (container_type->id == ZigTypeIdPointer) {
|
|
init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len);
|
|
} else {
|
|
init_const_usize(ira->codegen, len_val, container_type->data.array.len);
|
|
}
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
bool ptr_is_const = true;
|
|
bool ptr_is_volatile = false;
|
|
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, len_val,
|
|
usize, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
|
} else {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("no field named '%s' in '%s'", buf_ptr(field_name),
|
|
buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (container_type->id == ZigTypeIdMetaType) {
|
|
ZigValue *container_ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
|
|
if (!container_ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(container_ptr->value->type->id == ZigTypeIdPointer);
|
|
ZigValue *child_val = const_ptr_pointee(ira, ira->codegen, container_ptr_val, source_node);
|
|
if (child_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
field_ptr_instruction->base.base.source_node, child_val, UndefBad)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *child_type = child_val->data.x_type;
|
|
|
|
if (type_is_invalid(child_type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (is_container(child_type)) {
|
|
if (child_type->id == ZigTypeIdEnum) {
|
|
if ((err = type_resolve(ira->codegen, child_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeEnumField *field = find_enum_type_field(child_type, field_name);
|
|
if (field) {
|
|
bool ptr_is_const = true;
|
|
bool ptr_is_volatile = false;
|
|
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
|
create_const_enum(ira->codegen, child_type, &field->value), child_type,
|
|
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
|
}
|
|
}
|
|
ScopeDecls *container_scope = get_container_scope(child_type);
|
|
Tld *tld = find_container_decl(ira->codegen, container_scope, field_name);
|
|
if (tld) {
|
|
if (tld->visib_mod == VisibModPrivate &&
|
|
tld->import != get_scope_import(field_ptr_instruction->base.base.scope))
|
|
{
|
|
ErrorMsg *msg = ir_add_error(ira, &field_ptr_instruction->base.base,
|
|
buf_sprintf("'%s' is private", buf_ptr(field_name)));
|
|
add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("declared here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return ir_analyze_decl_ref(ira, &field_ptr_instruction->base.base, tld);
|
|
}
|
|
if (child_type->id == ZigTypeIdUnion &&
|
|
(child_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr ||
|
|
child_type->data.unionation.decl_node->data.container_decl.auto_enum))
|
|
{
|
|
if ((err = type_resolve(ira->codegen, child_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
TypeUnionField *field = find_union_type_field(child_type, field_name);
|
|
if (field) {
|
|
ZigType *enum_type = child_type->data.unionation.tag_type;
|
|
bool ptr_is_const = true;
|
|
bool ptr_is_volatile = false;
|
|
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
|
create_const_enum(ira->codegen, enum_type, &field->enum_field->value), enum_type,
|
|
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
|
}
|
|
}
|
|
const char *container_name = (child_type == ira->codegen->root_import) ?
|
|
"root source file" : buf_ptr(buf_sprintf("container '%s'", buf_ptr(&child_type->name)));
|
|
ir_add_error(ira, &field_ptr_instruction->base.base,
|
|
buf_sprintf("%s has no member called '%s'",
|
|
container_name, buf_ptr(field_name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (child_type->id == ZigTypeIdErrorSet) {
|
|
ErrorTableEntry *err_entry;
|
|
ZigType *err_set_type;
|
|
if (type_is_global_error_set(child_type)) {
|
|
auto existing_entry = ira->codegen->error_table.maybe_get(field_name);
|
|
if (existing_entry) {
|
|
err_entry = existing_entry->value;
|
|
} else {
|
|
err_entry = heap::c_allocator.create<ErrorTableEntry>();
|
|
err_entry->decl_node = field_ptr_instruction->base.base.source_node;
|
|
buf_init_from_buf(&err_entry->name, field_name);
|
|
size_t error_value_count = ira->codegen->errors_by_index.length;
|
|
assert((uint32_t)error_value_count < (((uint32_t)1) << (uint32_t)ira->codegen->err_tag_type->data.integral.bit_count));
|
|
err_entry->value = error_value_count;
|
|
ira->codegen->errors_by_index.append(err_entry);
|
|
ira->codegen->error_table.put(field_name, err_entry);
|
|
}
|
|
if (err_entry->set_with_only_this_in_it == nullptr) {
|
|
err_entry->set_with_only_this_in_it = make_err_set_with_one_item(ira->codegen,
|
|
field_ptr_instruction->base.base.scope, field_ptr_instruction->base.base.source_node,
|
|
err_entry);
|
|
}
|
|
err_set_type = err_entry->set_with_only_this_in_it;
|
|
} else {
|
|
if (!resolve_inferred_error_set(ira->codegen, child_type, field_ptr_instruction->base.base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
err_entry = find_err_table_entry(child_type, field_name);
|
|
if (err_entry == nullptr) {
|
|
ir_add_error(ira, &field_ptr_instruction->base.base,
|
|
buf_sprintf("no error named '%s' in '%s'", buf_ptr(field_name), buf_ptr(&child_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
err_set_type = child_type;
|
|
}
|
|
ZigValue *const_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
const_val->special = ConstValSpecialStatic;
|
|
const_val->type = err_set_type;
|
|
const_val->data.x_err_set = err_entry;
|
|
|
|
bool ptr_is_const = true;
|
|
bool ptr_is_volatile = false;
|
|
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, const_val,
|
|
err_set_type, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
|
} else {
|
|
ir_add_error(ira, &field_ptr_instruction->base.base,
|
|
buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (field_ptr_instruction->initializing) {
|
|
ir_add_error(ira, &field_ptr_instruction->base.base,
|
|
buf_sprintf("type '%s' does not support struct initialization syntax", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
ir_add_error_node(ira, field_ptr_instruction->base.base.source_node,
|
|
buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_store_ptr(IrAnalyze *ira, IrInstSrcStorePtr *instruction) {
|
|
IrInstGen *ptr = instruction->ptr->child;
|
|
if (type_is_invalid(ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_store_ptr(ira, &instruction->base.base, ptr, value, instruction->allow_write_through_const);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstSrcLoadPtr *instruction) {
|
|
IrInstGen *ptr = instruction->ptr->child;
|
|
if (type_is_invalid(ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_get_deref(ira, &instruction->base.base, ptr, nullptr);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstSrcTypeOf *typeof_instruction) {
|
|
ZigType *type_entry;
|
|
|
|
const size_t value_count = typeof_instruction->value_count;
|
|
|
|
// Fast path for the common case of TypeOf with a single argument
|
|
if (value_count < 2) {
|
|
type_entry = typeof_instruction->value.scalar->child->value->type;
|
|
} else {
|
|
IrInstGen **args = heap::c_allocator.allocate<IrInstGen*>(value_count);
|
|
for (size_t i = 0; i < value_count; i += 1) {
|
|
IrInstGen *value = typeof_instruction->value.list[i]->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
args[i] = value;
|
|
}
|
|
|
|
type_entry = ir_resolve_peer_types(ira, typeof_instruction->base.base.source_node,
|
|
nullptr, args, value_count);
|
|
|
|
heap::c_allocator.deallocate(args, value_count);
|
|
}
|
|
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_type(ira, &typeof_instruction->base.base, type_entry);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_set_cold(IrAnalyze *ira, IrInstSrcSetCold *instruction) {
|
|
if (ira->new_irb.exec->is_inline) {
|
|
// ignore setCold when running functions at compile time
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
IrInstGen *is_cold_value = instruction->is_cold->child;
|
|
bool want_cold;
|
|
if (!ir_resolve_bool(ira, is_cold_value, &want_cold))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigFn *fn_entry = scope_fn_entry(instruction->base.base.scope);
|
|
if (fn_entry == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("@setCold outside function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (fn_entry->set_cold_node != nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base, buf_sprintf("cold set twice in same function"));
|
|
add_error_note(ira->codegen, msg, fn_entry->set_cold_node, buf_sprintf("first set here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
fn_entry->set_cold_node = instruction->base.base.source_node;
|
|
fn_entry->is_cold = want_cold;
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_set_runtime_safety(IrAnalyze *ira,
|
|
IrInstSrcSetRuntimeSafety *set_runtime_safety_instruction)
|
|
{
|
|
if (ira->new_irb.exec->is_inline) {
|
|
// ignore setRuntimeSafety when running functions at compile time
|
|
return ir_const_void(ira, &set_runtime_safety_instruction->base.base);
|
|
}
|
|
|
|
bool *safety_off_ptr;
|
|
AstNode **safety_set_node_ptr;
|
|
|
|
Scope *scope = set_runtime_safety_instruction->base.base.scope;
|
|
while (scope != nullptr) {
|
|
if (scope->id == ScopeIdBlock) {
|
|
ScopeBlock *block_scope = (ScopeBlock *)scope;
|
|
safety_off_ptr = &block_scope->safety_off;
|
|
safety_set_node_ptr = &block_scope->safety_set_node;
|
|
break;
|
|
} else if (scope->id == ScopeIdFnDef) {
|
|
ScopeFnDef *def_scope = (ScopeFnDef *)scope;
|
|
ZigFn *target_fn = def_scope->fn_entry;
|
|
assert(target_fn->def_scope != nullptr);
|
|
safety_off_ptr = &target_fn->def_scope->safety_off;
|
|
safety_set_node_ptr = &target_fn->def_scope->safety_set_node;
|
|
break;
|
|
} else if (scope->id == ScopeIdDecls) {
|
|
ScopeDecls *decls_scope = (ScopeDecls *)scope;
|
|
safety_off_ptr = &decls_scope->safety_off;
|
|
safety_set_node_ptr = &decls_scope->safety_set_node;
|
|
break;
|
|
} else {
|
|
scope = scope->parent;
|
|
continue;
|
|
}
|
|
}
|
|
assert(scope != nullptr);
|
|
|
|
IrInstGen *safety_on_value = set_runtime_safety_instruction->safety_on->child;
|
|
bool want_runtime_safety;
|
|
if (!ir_resolve_bool(ira, safety_on_value, &want_runtime_safety))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AstNode *source_node = set_runtime_safety_instruction->base.base.source_node;
|
|
if (*safety_set_node_ptr) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("runtime safety set twice for same scope"));
|
|
add_error_note(ira->codegen, msg, *safety_set_node_ptr, buf_sprintf("first set here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
*safety_set_node_ptr = source_node;
|
|
*safety_off_ptr = !want_runtime_safety;
|
|
|
|
return ir_const_void(ira, &set_runtime_safety_instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
|
|
IrInstSrcSetFloatMode *instruction)
|
|
{
|
|
if (ira->new_irb.exec->is_inline) {
|
|
// ignore setFloatMode when running functions at compile time
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
bool *fast_math_on_ptr;
|
|
AstNode **fast_math_set_node_ptr;
|
|
|
|
Scope *scope = instruction->base.base.scope;
|
|
while (scope != nullptr) {
|
|
if (scope->id == ScopeIdBlock) {
|
|
ScopeBlock *block_scope = (ScopeBlock *)scope;
|
|
fast_math_on_ptr = &block_scope->fast_math_on;
|
|
fast_math_set_node_ptr = &block_scope->fast_math_set_node;
|
|
break;
|
|
} else if (scope->id == ScopeIdFnDef) {
|
|
ScopeFnDef *def_scope = (ScopeFnDef *)scope;
|
|
ZigFn *target_fn = def_scope->fn_entry;
|
|
assert(target_fn->def_scope != nullptr);
|
|
fast_math_on_ptr = &target_fn->def_scope->fast_math_on;
|
|
fast_math_set_node_ptr = &target_fn->def_scope->fast_math_set_node;
|
|
break;
|
|
} else if (scope->id == ScopeIdDecls) {
|
|
ScopeDecls *decls_scope = (ScopeDecls *)scope;
|
|
fast_math_on_ptr = &decls_scope->fast_math_on;
|
|
fast_math_set_node_ptr = &decls_scope->fast_math_set_node;
|
|
break;
|
|
} else {
|
|
scope = scope->parent;
|
|
continue;
|
|
}
|
|
}
|
|
assert(scope != nullptr);
|
|
|
|
IrInstGen *float_mode_value = instruction->mode_value->child;
|
|
FloatMode float_mode_scalar;
|
|
if (!ir_resolve_float_mode(ira, float_mode_value, &float_mode_scalar))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AstNode *source_node = instruction->base.base.source_node;
|
|
if (*fast_math_set_node_ptr) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
|
buf_sprintf("float mode set twice for same scope"));
|
|
add_error_note(ira->codegen, msg, *fast_math_set_node_ptr, buf_sprintf("first set here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
*fast_math_set_node_ptr = source_node;
|
|
*fast_math_on_ptr = (float_mode_scalar == FloatModeOptimized);
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_any_frame_type(IrAnalyze *ira, IrInstSrcAnyFrameType *instruction) {
|
|
ZigType *payload_type = nullptr;
|
|
if (instruction->payload_type != nullptr) {
|
|
payload_type = ir_resolve_type(ira, instruction->payload_type->child);
|
|
if (type_is_invalid(payload_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type);
|
|
return ir_const_type(ira, &instruction->base.base, any_frame_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_slice_type(IrAnalyze *ira, IrInstSrcSliceType *slice_type_instruction) {
|
|
IrInstGen *result = ir_const(ira, &slice_type_instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueSliceType *lazy_slice_type = heap::c_allocator.create<LazyValueSliceType>();
|
|
lazy_slice_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_slice_type->base;
|
|
lazy_slice_type->base.id = LazyValueIdSliceType;
|
|
|
|
if (slice_type_instruction->align_value != nullptr) {
|
|
lazy_slice_type->align_inst = slice_type_instruction->align_value->child;
|
|
if (ir_resolve_const(ira, lazy_slice_type->align_inst, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (slice_type_instruction->sentinel != nullptr) {
|
|
lazy_slice_type->sentinel = slice_type_instruction->sentinel->child;
|
|
if (ir_resolve_const(ira, lazy_slice_type->sentinel, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_slice_type->elem_type = slice_type_instruction->child_type->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_slice_type->elem_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
lazy_slice_type->is_const = slice_type_instruction->is_const;
|
|
lazy_slice_type->is_volatile = slice_type_instruction->is_volatile;
|
|
lazy_slice_type->is_allowzero = slice_type_instruction->is_allow_zero;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_asm(IrAnalyze *ira, IrInstSrcAsm *asm_instruction) {
|
|
Error err;
|
|
|
|
assert(asm_instruction->base.base.source_node->type == NodeTypeAsmExpr);
|
|
|
|
AstNode *node = asm_instruction->base.base.source_node;
|
|
AstNodeAsmExpr *asm_expr = &asm_instruction->base.base.source_node->data.asm_expr;
|
|
|
|
Buf *template_buf = ir_resolve_str(ira, asm_instruction->asm_template->child);
|
|
if (template_buf == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (asm_instruction->is_global) {
|
|
buf_append_char(&ira->codegen->global_asm, '\n');
|
|
buf_append_buf(&ira->codegen->global_asm, template_buf);
|
|
|
|
return ir_const_void(ira, &asm_instruction->base.base);
|
|
}
|
|
|
|
if (!ir_emit_global_runtime_side_effect(ira, &asm_instruction->base.base))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigList<AsmToken> tok_list = {};
|
|
if ((err = parse_asm_template(ira, node, template_buf, &tok_list))) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
for (size_t token_i = 0; token_i < tok_list.length; token_i += 1) {
|
|
AsmToken asm_token = tok_list.at(token_i);
|
|
if (asm_token.id == AsmTokenIdVar) {
|
|
size_t index = find_asm_index(ira->codegen, node, &asm_token, template_buf);
|
|
if (index == SIZE_MAX) {
|
|
const char *ptr = buf_ptr(template_buf) + asm_token.start + 2;
|
|
uint32_t len = asm_token.end - asm_token.start - 2;
|
|
|
|
add_node_error(ira->codegen, node,
|
|
buf_sprintf("could not find '%.*s' in the inputs or outputs",
|
|
len, ptr));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
|
|
// TODO validate the output types and variable types
|
|
|
|
IrInstGen **input_list = heap::c_allocator.allocate<IrInstGen *>(asm_expr->input_list.length);
|
|
IrInstGen **output_types = heap::c_allocator.allocate<IrInstGen *>(asm_expr->output_list.length);
|
|
|
|
ZigType *return_type = ira->codegen->builtin_types.entry_void;
|
|
for (size_t i = 0; i < asm_expr->output_list.length; i += 1) {
|
|
AsmOutput *asm_output = asm_expr->output_list.at(i);
|
|
if (asm_output->return_type) {
|
|
output_types[i] = asm_instruction->output_types[i]->child;
|
|
return_type = ir_resolve_type(ira, output_types[i]);
|
|
if (type_is_invalid(return_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
for (size_t i = 0; i < asm_expr->input_list.length; i += 1) {
|
|
IrInstGen *const input_value = asm_instruction->input_list[i]->child;
|
|
if (type_is_invalid(input_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(input_value) &&
|
|
(input_value->value->type->id == ZigTypeIdComptimeInt ||
|
|
input_value->value->type->id == ZigTypeIdComptimeFloat)) {
|
|
ir_add_error(ira, &input_value->base,
|
|
buf_sprintf("expected sized integer or sized float, found %s", buf_ptr(&input_value->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
input_list[i] = input_value;
|
|
}
|
|
|
|
return ir_build_asm_gen(ira, &asm_instruction->base.base,
|
|
template_buf, tok_list.items, tok_list.length,
|
|
input_list, output_types, asm_instruction->output_vars, asm_instruction->return_count,
|
|
asm_instruction->has_side_effects, return_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_array_type(IrAnalyze *ira, IrInstSrcArrayType *array_type_instruction) {
|
|
IrInstGen *result = ir_const(ira, &array_type_instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueArrayType *lazy_array_type = heap::c_allocator.create<LazyValueArrayType>();
|
|
lazy_array_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_array_type->base;
|
|
lazy_array_type->base.id = LazyValueIdArrayType;
|
|
|
|
lazy_array_type->elem_type = array_type_instruction->child_type->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_array_type->elem_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!ir_resolve_usize(ira, array_type_instruction->size->child, &lazy_array_type->length))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (array_type_instruction->sentinel != nullptr) {
|
|
lazy_array_type->sentinel = array_type_instruction->sentinel->child;
|
|
if (ir_resolve_const(ira, lazy_array_type->sentinel, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_size_of(IrAnalyze *ira, IrInstSrcSizeOf *instruction) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_num_lit_int);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueSizeOf *lazy_size_of = heap::c_allocator.create<LazyValueSizeOf>();
|
|
lazy_size_of->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_size_of->base;
|
|
lazy_size_of->base.id = LazyValueIdSizeOf;
|
|
lazy_size_of->bit_size = instruction->bit_size;
|
|
|
|
lazy_size_of->target_type = instruction->type_value->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_size_of->target_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_test_non_null(IrAnalyze *ira, IrInst *source_inst, IrInstGen *value) {
|
|
ZigType *type_entry = value->value->type;
|
|
|
|
if (type_entry->id == ZigTypeIdPointer && type_entry->data.pointer.allow_zero) {
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *c_ptr_val = ir_resolve_const(ira, value, UndefOk);
|
|
if (c_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (c_ptr_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_inst, ira->codegen->builtin_types.entry_bool);
|
|
bool is_null = c_ptr_val->data.x_ptr.special == ConstPtrSpecialNull ||
|
|
(c_ptr_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
|
|
c_ptr_val->data.x_ptr.data.hard_coded_addr.addr == 0);
|
|
return ir_const_bool(ira, source_inst, !is_null);
|
|
}
|
|
|
|
return ir_build_test_non_null_gen(ira, source_inst, value);
|
|
} else if (type_entry->id == ZigTypeIdOptional) {
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *maybe_val = ir_resolve_const(ira, value, UndefOk);
|
|
if (maybe_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (maybe_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, source_inst, ira->codegen->builtin_types.entry_bool);
|
|
|
|
return ir_const_bool(ira, source_inst, !optional_value_is_null(maybe_val));
|
|
}
|
|
|
|
return ir_build_test_non_null_gen(ira, source_inst, value);
|
|
} else if (type_entry->id == ZigTypeIdNull) {
|
|
return ir_const_bool(ira, source_inst, false);
|
|
} else {
|
|
return ir_const_bool(ira, source_inst, true);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrInstSrcTestNonNull *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_test_non_null(ira, &instruction->base.base, value);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool safety_check_on, bool initializing)
|
|
{
|
|
Error err;
|
|
|
|
ZigType *type_entry = get_ptr_elem_type(ira->codegen, base_ptr);
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (type_entry->id == ZigTypeIdPointer && type_entry->data.pointer.ptr_len == PtrLenC) {
|
|
if (instr_is_comptime(base_ptr)) {
|
|
ZigValue *val = ir_resolve_const(ira, base_ptr, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
ZigValue *c_ptr_val = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node);
|
|
if (c_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
bool is_null = c_ptr_val->data.x_ptr.special == ConstPtrSpecialNull ||
|
|
(c_ptr_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
|
|
c_ptr_val->data.x_ptr.data.hard_coded_addr.addr == 0);
|
|
if (is_null) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("unable to unwrap null"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return base_ptr;
|
|
}
|
|
}
|
|
if (!safety_check_on)
|
|
return base_ptr;
|
|
IrInstGen *c_ptr_val = ir_get_deref(ira, source_instr, base_ptr, nullptr);
|
|
ir_build_assert_non_null(ira, source_instr, c_ptr_val);
|
|
return base_ptr;
|
|
}
|
|
|
|
if (type_entry->id != ZigTypeIdOptional) {
|
|
ir_add_error(ira, &base_ptr->base,
|
|
buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_type = type_entry->data.maybe.child_type;
|
|
ZigType *result_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
|
base_ptr->value->type->data.pointer.is_const, base_ptr->value->type->data.pointer.is_volatile,
|
|
PtrLenSingle, 0, 0, 0, false);
|
|
|
|
bool same_comptime_repr = types_have_same_zig_comptime_repr(ira->codegen, child_type, type_entry);
|
|
|
|
if (instr_is_comptime(base_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
ZigValue *optional_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
|
|
if (optional_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (initializing) {
|
|
switch (type_has_one_possible_value(ira->codegen, child_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueNo:
|
|
if (!same_comptime_repr) {
|
|
ZigValue *payload_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
payload_val->type = child_type;
|
|
payload_val->special = ConstValSpecialUndef;
|
|
payload_val->parent.id = ConstParentIdOptionalPayload;
|
|
payload_val->parent.data.p_optional_payload.optional_val = optional_val;
|
|
|
|
optional_val->data.x_optional = payload_val;
|
|
optional_val->special = ConstValSpecialStatic;
|
|
}
|
|
break;
|
|
case OnePossibleValueYes: {
|
|
optional_val->special = ConstValSpecialStatic;
|
|
optional_val->data.x_optional = get_the_one_possible_value(ira->codegen, child_type);
|
|
break;
|
|
}
|
|
}
|
|
} else {
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
|
|
source_instr->source_node, optional_val, UndefBad)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (optional_value_is_null(optional_val)) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("unable to unwrap null"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_optional_unwrap_ptr_gen(ira, source_instr, base_ptr, false,
|
|
initializing, result_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, source_instr, result_type);
|
|
}
|
|
ZigValue *result_val = result->value;
|
|
result_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut;
|
|
switch (type_has_one_possible_value(ira->codegen, child_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueNo:
|
|
if (same_comptime_repr) {
|
|
result_val->data.x_ptr.data.ref.pointee = optional_val;
|
|
} else {
|
|
assert(optional_val->data.x_optional != nullptr);
|
|
result_val->data.x_ptr.data.ref.pointee = optional_val->data.x_optional;
|
|
}
|
|
break;
|
|
case OnePossibleValueYes:
|
|
assert(optional_val->data.x_optional != nullptr);
|
|
result_val->data.x_ptr.data.ref.pointee = optional_val->data.x_optional;
|
|
break;
|
|
}
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_optional_unwrap_ptr_gen(ira, source_instr, base_ptr, safety_check_on,
|
|
initializing, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_optional_unwrap_ptr(IrAnalyze *ira,
|
|
IrInstSrcOptionalUnwrapPtr *instruction)
|
|
{
|
|
IrInstGen *base_ptr = instruction->base_ptr->child;
|
|
if (type_is_invalid(base_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_unwrap_optional_payload(ira, &instruction->base.base, base_ptr,
|
|
instruction->safety_check_on, false);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_ctz(IrAnalyze *ira, IrInstSrcCtz *instruction) {
|
|
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
|
|
if (type_is_invalid(int_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op = ir_implicit_cast(ira, instruction->op->child, int_type);
|
|
if (type_is_invalid(op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (int_type->data.integral.bit_count == 0)
|
|
return ir_const_unsigned(ira, &instruction->base.base, 0);
|
|
|
|
if (instr_is_comptime(op)) {
|
|
ZigValue *val = ir_resolve_const(ira, op, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, ira->codegen->builtin_types.entry_num_lit_int);
|
|
size_t result_usize = bigint_ctz(&op->value->data.x_bigint, int_type->data.integral.bit_count);
|
|
return ir_const_unsigned(ira, &instruction->base.base, result_usize);
|
|
}
|
|
|
|
ZigType *return_type = get_smallest_unsigned_int_type(ira->codegen, int_type->data.integral.bit_count);
|
|
return ir_build_ctz_gen(ira, &instruction->base.base, return_type, op);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_clz(IrAnalyze *ira, IrInstSrcClz *instruction) {
|
|
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
|
|
if (type_is_invalid(int_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op = ir_implicit_cast(ira, instruction->op->child, int_type);
|
|
if (type_is_invalid(op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (int_type->data.integral.bit_count == 0)
|
|
return ir_const_unsigned(ira, &instruction->base.base, 0);
|
|
|
|
if (instr_is_comptime(op)) {
|
|
ZigValue *val = ir_resolve_const(ira, op, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, ira->codegen->builtin_types.entry_num_lit_int);
|
|
size_t result_usize = bigint_clz(&op->value->data.x_bigint, int_type->data.integral.bit_count);
|
|
return ir_const_unsigned(ira, &instruction->base.base, result_usize);
|
|
}
|
|
|
|
ZigType *return_type = get_smallest_unsigned_int_type(ira->codegen, int_type->data.integral.bit_count);
|
|
return ir_build_clz_gen(ira, &instruction->base.base, return_type, op);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_pop_count(IrAnalyze *ira, IrInstSrcPopCount *instruction) {
|
|
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
|
|
if (type_is_invalid(int_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op = ir_implicit_cast(ira, instruction->op->child, int_type);
|
|
if (type_is_invalid(op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (int_type->data.integral.bit_count == 0)
|
|
return ir_const_unsigned(ira, &instruction->base.base, 0);
|
|
|
|
if (instr_is_comptime(op)) {
|
|
ZigValue *val = ir_resolve_const(ira, op, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, ira->codegen->builtin_types.entry_num_lit_int);
|
|
|
|
if (bigint_cmp_zero(&val->data.x_bigint) != CmpLT) {
|
|
size_t result = bigint_popcount_unsigned(&val->data.x_bigint);
|
|
return ir_const_unsigned(ira, &instruction->base.base, result);
|
|
}
|
|
size_t result = bigint_popcount_signed(&val->data.x_bigint, int_type->data.integral.bit_count);
|
|
return ir_const_unsigned(ira, &instruction->base.base, result);
|
|
}
|
|
|
|
ZigType *return_type = get_smallest_unsigned_int_type(ira->codegen, int_type->data.integral.bit_count);
|
|
return ir_build_pop_count_gen(ira, &instruction->base.base, return_type, op);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_union_tag(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value, bool is_gen) {
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (value->value->type->id != ZigTypeIdUnion) {
|
|
ir_add_error(ira, &value->base,
|
|
buf_sprintf("expected enum or union type, found '%s'", buf_ptr(&value->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!value->value->type->data.unionation.have_explicit_tag_type && !is_gen) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("union has no associated enum"));
|
|
if (value->value->type->data.unionation.decl_node != nullptr) {
|
|
add_error_note(ira->codegen, msg, value->value->type->data.unionation.decl_node,
|
|
buf_sprintf("declared here"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *tag_type = value->value->type->data.unionation.tag_type;
|
|
assert(tag_type->id == ZigTypeIdEnum);
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
|
|
source_instr->scope, source_instr->source_node);
|
|
const_instruction->base.value->type = tag_type;
|
|
const_instruction->base.value->special = ConstValSpecialStatic;
|
|
bigint_init_bigint(&const_instruction->base.value->data.x_enum_tag, &val->data.x_union.tag);
|
|
return &const_instruction->base;
|
|
}
|
|
|
|
return ir_build_union_tag(ira, source_instr, value, tag_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_switch_br(IrAnalyze *ira,
|
|
IrInstSrcSwitchBr *switch_br_instruction)
|
|
{
|
|
IrInstGen *target_value = switch_br_instruction->target_value->child;
|
|
if (type_is_invalid(target_value->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
if (switch_br_instruction->switch_prongs_void != nullptr) {
|
|
if (type_is_invalid(switch_br_instruction->switch_prongs_void->child->value->type)) {
|
|
return ir_unreach_error(ira);
|
|
}
|
|
}
|
|
|
|
|
|
size_t case_count = switch_br_instruction->case_count;
|
|
|
|
bool is_comptime;
|
|
if (!ir_resolve_comptime(ira, switch_br_instruction->is_comptime->child, &is_comptime))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (is_comptime || instr_is_comptime(target_value)) {
|
|
ZigValue *target_val = ir_resolve_const(ira, target_value, UndefBad);
|
|
if (!target_val)
|
|
return ir_unreach_error(ira);
|
|
|
|
IrBasicBlockSrc *old_dest_block = switch_br_instruction->else_block;
|
|
for (size_t i = 0; i < case_count; i += 1) {
|
|
IrInstSrcSwitchBrCase *old_case = &switch_br_instruction->cases[i];
|
|
IrInstGen *case_value = old_case->value->child;
|
|
if (type_is_invalid(case_value->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
IrInstGen *casted_case_value = ir_implicit_cast(ira, case_value, target_value->value->type);
|
|
if (type_is_invalid(casted_case_value->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
ZigValue *case_val = ir_resolve_const(ira, casted_case_value, UndefBad);
|
|
if (!case_val)
|
|
return ir_unreach_error(ira);
|
|
|
|
if (const_values_equal(ira->codegen, target_val, case_val)) {
|
|
old_dest_block = old_case->block;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (is_comptime || old_dest_block->ref_count == 1) {
|
|
return ir_inline_bb(ira, &switch_br_instruction->base.base, old_dest_block);
|
|
} else {
|
|
IrBasicBlockGen *new_dest_block = ir_get_new_bb(ira, old_dest_block, &switch_br_instruction->base.base);
|
|
IrInstGen *result = ir_build_br_gen(ira, &switch_br_instruction->base.base, new_dest_block);
|
|
return ir_finish_anal(ira, result);
|
|
}
|
|
}
|
|
|
|
IrInstGenSwitchBrCase *cases = heap::c_allocator.allocate<IrInstGenSwitchBrCase>(case_count);
|
|
for (size_t i = 0; i < case_count; i += 1) {
|
|
IrInstSrcSwitchBrCase *old_case = &switch_br_instruction->cases[i];
|
|
IrInstGenSwitchBrCase *new_case = &cases[i];
|
|
new_case->block = ir_get_new_bb(ira, old_case->block, &switch_br_instruction->base.base);
|
|
new_case->value = ira->codegen->invalid_inst_gen;
|
|
|
|
// Calling ir_get_new_bb set the ref_instruction on the new basic block.
|
|
// However a switch br may branch to the same basic block which would trigger an
|
|
// incorrect re-generation of the block. So we set it to null here and assign
|
|
// it back after the loop.
|
|
new_case->block->ref_instruction = nullptr;
|
|
|
|
IrInstSrc *old_value = old_case->value;
|
|
IrInstGen *new_value = old_value->child;
|
|
if (type_is_invalid(new_value->value->type))
|
|
continue;
|
|
|
|
IrInstGen *casted_new_value = ir_implicit_cast(ira, new_value, target_value->value->type);
|
|
if (type_is_invalid(casted_new_value->value->type))
|
|
continue;
|
|
|
|
if (!ir_resolve_const(ira, casted_new_value, UndefBad))
|
|
continue;
|
|
|
|
new_case->value = casted_new_value;
|
|
}
|
|
|
|
for (size_t i = 0; i < case_count; i += 1) {
|
|
IrInstGenSwitchBrCase *new_case = &cases[i];
|
|
if (type_is_invalid(new_case->value->value->type))
|
|
return ir_unreach_error(ira);
|
|
new_case->block->ref_instruction = &switch_br_instruction->base.base;
|
|
}
|
|
|
|
IrBasicBlockGen *new_else_block = ir_get_new_bb(ira, switch_br_instruction->else_block, &switch_br_instruction->base.base);
|
|
IrInstGenSwitchBr *switch_br = ir_build_switch_br_gen(ira, &switch_br_instruction->base.base,
|
|
target_value, new_else_block, case_count, cases);
|
|
return ir_finish_anal(ira, &switch_br->base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_switch_target(IrAnalyze *ira,
|
|
IrInstSrcSwitchTarget *switch_target_instruction)
|
|
{
|
|
Error err;
|
|
IrInstGen *target_value_ptr = switch_target_instruction->target_value_ptr->child;
|
|
if (type_is_invalid(target_value_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target_value_ptr->value->type->id == ZigTypeIdMetaType) {
|
|
assert(instr_is_comptime(target_value_ptr));
|
|
ZigType *ptr_type = target_value_ptr->value->data.x_type;
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
return ir_const_type(ira, &switch_target_instruction->base.base, ptr_type->data.pointer.child_type);
|
|
}
|
|
|
|
ZigType *target_type = target_value_ptr->value->type->data.pointer.child_type;
|
|
ZigValue *pointee_val = nullptr;
|
|
if (instr_is_comptime(target_value_ptr) && target_value_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
pointee_val = const_ptr_pointee(ira, ira->codegen, target_value_ptr->value, target_value_ptr->base.source_node);
|
|
if (pointee_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (pointee_val->special == ConstValSpecialRuntime)
|
|
pointee_val = nullptr;
|
|
}
|
|
if ((err = type_resolve(ira->codegen, target_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
switch (target_type->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdErrorSet: {
|
|
if (pointee_val) {
|
|
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, nullptr);
|
|
copy_const_val(ira->codegen, result->value, pointee_val);
|
|
result->value->type = target_type;
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *result = ir_get_deref(ira, &switch_target_instruction->base.base, target_value_ptr, nullptr);
|
|
result->value->type = target_type;
|
|
return result;
|
|
}
|
|
case ZigTypeIdUnion: {
|
|
AstNode *decl_node = target_type->data.unionation.decl_node;
|
|
if (!decl_node->data.container_decl.auto_enum &&
|
|
decl_node->data.container_decl.init_arg_expr == nullptr)
|
|
{
|
|
ErrorMsg *msg = ir_add_error(ira, &target_value_ptr->base,
|
|
buf_sprintf("switch on union which has no attached enum"));
|
|
add_error_note(ira->codegen, msg, decl_node,
|
|
buf_sprintf("consider 'union(enum)' here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *tag_type = target_type->data.unionation.tag_type;
|
|
assert(tag_type != nullptr);
|
|
assert(tag_type->id == ZigTypeIdEnum);
|
|
if (pointee_val) {
|
|
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, tag_type);
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &pointee_val->data.x_union.tag);
|
|
return result;
|
|
}
|
|
|
|
if (can_fold_enum_type(tag_type)) {
|
|
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, tag_type);
|
|
TypeEnumField *only_field = &tag_type->data.enumeration.fields[0];
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &only_field->value);
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *union_value = ir_get_deref(ira, &switch_target_instruction->base.base, target_value_ptr, nullptr);
|
|
union_value->value->type = target_type;
|
|
|
|
return ir_build_union_tag(ira, &switch_target_instruction->base.base, union_value, tag_type);
|
|
}
|
|
case ZigTypeIdEnum: {
|
|
if ((err = type_resolve(ira->codegen, target_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (can_fold_enum_type(target_type)) {
|
|
TypeEnumField *only_field = &target_type->data.enumeration.fields[0];
|
|
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, target_type);
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &only_field->value);
|
|
return result;
|
|
}
|
|
|
|
if (pointee_val) {
|
|
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, target_type);
|
|
bigint_init_bigint(&result->value->data.x_enum_tag, &pointee_val->data.x_enum_tag);
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *enum_value = ir_get_deref(ira, &switch_target_instruction->base.base, target_value_ptr, nullptr);
|
|
enum_value->value->type = target_type;
|
|
return enum_value;
|
|
}
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdVector:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
ir_add_error(ira, &switch_target_instruction->base.base,
|
|
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_switch_var(IrAnalyze *ira, IrInstSrcSwitchVar *instruction) {
|
|
IrInstGen *target_value_ptr = instruction->target_value_ptr->child;
|
|
if (type_is_invalid(target_value_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ref_type = target_value_ptr->value->type;
|
|
assert(ref_type->id == ZigTypeIdPointer);
|
|
ZigType *target_type = target_value_ptr->value->type->data.pointer.child_type;
|
|
if (target_type->id == ZigTypeIdUnion) {
|
|
ZigType *enum_type = target_type->data.unionation.tag_type;
|
|
assert(enum_type != nullptr);
|
|
assert(enum_type->id == ZigTypeIdEnum);
|
|
assert(instruction->prongs_len > 0);
|
|
|
|
IrInstGen *first_prong_value = instruction->prongs_ptr[0]->child;
|
|
if (type_is_invalid(first_prong_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *first_casted_prong_value = ir_implicit_cast(ira, first_prong_value, enum_type);
|
|
if (type_is_invalid(first_casted_prong_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *first_prong_val = ir_resolve_const(ira, first_casted_prong_value, UndefBad);
|
|
if (first_prong_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeUnionField *first_field = find_union_field_by_tag(target_type, &first_prong_val->data.x_enum_tag);
|
|
|
|
ErrorMsg *invalid_payload_msg = nullptr;
|
|
for (size_t prong_i = 1; prong_i < instruction->prongs_len; prong_i += 1) {
|
|
IrInstGen *this_prong_inst = instruction->prongs_ptr[prong_i]->child;
|
|
if (type_is_invalid(this_prong_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *this_casted_prong_value = ir_implicit_cast(ira, this_prong_inst, enum_type);
|
|
if (type_is_invalid(this_casted_prong_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *this_prong = ir_resolve_const(ira, this_casted_prong_value, UndefBad);
|
|
if (this_prong == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeUnionField *payload_field = find_union_field_by_tag(target_type, &this_prong->data.x_enum_tag);
|
|
ZigType *payload_type = payload_field->type_entry;
|
|
if (first_field->type_entry != payload_type) {
|
|
if (invalid_payload_msg == nullptr) {
|
|
invalid_payload_msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("capture group with incompatible types"));
|
|
add_error_note(ira->codegen, invalid_payload_msg, first_prong_value->base.source_node,
|
|
buf_sprintf("type '%s' here", buf_ptr(&first_field->type_entry->name)));
|
|
}
|
|
add_error_note(ira->codegen, invalid_payload_msg, this_prong_inst->base.source_node,
|
|
buf_sprintf("type '%s' here", buf_ptr(&payload_field->type_entry->name)));
|
|
}
|
|
}
|
|
|
|
if (invalid_payload_msg != nullptr) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(target_value_ptr)) {
|
|
ZigValue *target_val_ptr = ir_resolve_const(ira, target_value_ptr, UndefBad);
|
|
if (!target_value_ptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *pointee_val = const_ptr_pointee(ira, ira->codegen, target_val_ptr, instruction->base.base.source_node);
|
|
if (pointee_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base,
|
|
get_pointer_to_type(ira->codegen, first_field->type_entry,
|
|
target_val_ptr->type->data.pointer.is_const));
|
|
ZigValue *out_val = result->value;
|
|
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
out_val->data.x_ptr.mut = target_val_ptr->data.x_ptr.mut;
|
|
out_val->data.x_ptr.data.ref.pointee = pointee_val->data.x_union.payload;
|
|
return result;
|
|
}
|
|
|
|
ZigType *result_type = get_pointer_to_type(ira->codegen, first_field->type_entry,
|
|
target_value_ptr->value->type->data.pointer.is_const);
|
|
return ir_build_union_field_ptr(ira, &instruction->base.base, target_value_ptr, first_field,
|
|
false, false, result_type);
|
|
} else if (target_type->id == ZigTypeIdErrorSet) {
|
|
// construct an error set from the prong values
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
err_set_type->size_in_bits = ira->codegen->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = ira->codegen->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = ira->codegen->builtin_types.entry_global_error_set->abi_size;
|
|
ZigList<ErrorTableEntry *> error_list = {};
|
|
buf_resize(&err_set_type->name, 0);
|
|
buf_appendf(&err_set_type->name, "error{");
|
|
for (size_t i = 0; i < instruction->prongs_len; i += 1) {
|
|
ErrorTableEntry *err = ir_resolve_error(ira, instruction->prongs_ptr[i]->child);
|
|
if (err == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
error_list.append(err);
|
|
buf_appendf(&err_set_type->name, "%s,", buf_ptr(&err->name));
|
|
}
|
|
err_set_type->data.error_set.errors = error_list.items;
|
|
err_set_type->data.error_set.err_count = error_list.length;
|
|
buf_appendf(&err_set_type->name, "}");
|
|
|
|
|
|
ZigType *new_target_value_ptr_type = get_pointer_to_type_extra(ira->codegen,
|
|
err_set_type,
|
|
ref_type->data.pointer.is_const, ref_type->data.pointer.is_volatile,
|
|
ref_type->data.pointer.ptr_len,
|
|
ref_type->data.pointer.explicit_alignment,
|
|
ref_type->data.pointer.bit_offset_in_host, ref_type->data.pointer.host_int_bytes,
|
|
ref_type->data.pointer.allow_zero);
|
|
return ir_analyze_ptr_cast(ira, &instruction->base.base, target_value_ptr,
|
|
&instruction->target_value_ptr->base, new_target_value_ptr_type, &instruction->base.base, false, false);
|
|
} else {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("switch on type '%s' provides no expression parameter", buf_ptr(&target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_switch_else_var(IrAnalyze *ira,
|
|
IrInstSrcSwitchElseVar *instruction)
|
|
{
|
|
IrInstGen *target_value_ptr = instruction->target_value_ptr->child;
|
|
if (type_is_invalid(target_value_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ref_type = target_value_ptr->value->type;
|
|
assert(ref_type->id == ZigTypeIdPointer);
|
|
ZigType *target_type = target_value_ptr->value->type->data.pointer.child_type;
|
|
if (target_type->id == ZigTypeIdErrorSet) {
|
|
// make a new set that has the other cases removed
|
|
if (!resolve_inferred_error_set(ira->codegen, target_type, instruction->base.base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (type_is_global_error_set(target_type)) {
|
|
// the type of the else capture variable still has to be the global error set.
|
|
// once the runtime hint system is more sophisticated, we could add some hint information here.
|
|
return target_value_ptr;
|
|
}
|
|
// Make note of the errors handled by other cases
|
|
ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(ira->codegen->errors_by_index.length);
|
|
// We may not have any case in the switch if this is a lone else
|
|
const size_t switch_cases = instruction->switch_br ? instruction->switch_br->case_count : 0;
|
|
for (size_t case_i = 0; case_i < switch_cases; case_i += 1) {
|
|
IrInstSrcSwitchBrCase *br_case = &instruction->switch_br->cases[case_i];
|
|
IrInstGen *case_expr = br_case->value->child;
|
|
if (case_expr->value->type->id == ZigTypeIdErrorSet) {
|
|
ErrorTableEntry *err = ir_resolve_error(ira, case_expr);
|
|
if (err == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
errors[err->value] = err;
|
|
} else if (case_expr->value->type->id == ZigTypeIdMetaType) {
|
|
ZigType *err_set_type = ir_resolve_type(ira, case_expr);
|
|
if (type_is_invalid(err_set_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
populate_error_set_table(errors, err_set_type);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
ZigList<ErrorTableEntry *> result_list = {};
|
|
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
buf_resize(&err_set_type->name, 0);
|
|
buf_appendf(&err_set_type->name, "error{");
|
|
|
|
// Look at all the errors in the type switched on and add them to the result_list
|
|
// if they are not handled by cases.
|
|
for (uint32_t i = 0; i < target_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *error_entry = target_type->data.error_set.errors[i];
|
|
ErrorTableEntry *existing_entry = errors[error_entry->value];
|
|
if (existing_entry == nullptr) {
|
|
result_list.append(error_entry);
|
|
buf_appendf(&err_set_type->name, "%s,", buf_ptr(&error_entry->name));
|
|
}
|
|
}
|
|
heap::c_allocator.deallocate(errors, ira->codegen->errors_by_index.length);
|
|
|
|
err_set_type->data.error_set.err_count = result_list.length;
|
|
err_set_type->data.error_set.errors = result_list.items;
|
|
err_set_type->size_in_bits = ira->codegen->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = ira->codegen->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = ira->codegen->builtin_types.entry_global_error_set->abi_size;
|
|
|
|
buf_appendf(&err_set_type->name, "}");
|
|
|
|
ZigType *new_target_value_ptr_type = get_pointer_to_type_extra(ira->codegen,
|
|
err_set_type,
|
|
ref_type->data.pointer.is_const, ref_type->data.pointer.is_volatile,
|
|
ref_type->data.pointer.ptr_len,
|
|
ref_type->data.pointer.explicit_alignment,
|
|
ref_type->data.pointer.bit_offset_in_host, ref_type->data.pointer.host_int_bytes,
|
|
ref_type->data.pointer.allow_zero);
|
|
return ir_analyze_ptr_cast(ira, &instruction->base.base, target_value_ptr,
|
|
&instruction->target_value_ptr->base, new_target_value_ptr_type, &instruction->base.base, false, false);
|
|
}
|
|
|
|
return target_value_ptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_import(IrAnalyze *ira, IrInstSrcImport *import_instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *name_value = import_instruction->name->child;
|
|
Buf *import_target_str = ir_resolve_str(ira, name_value);
|
|
if (!import_target_str)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AstNode *source_node = import_instruction->base.base.source_node;
|
|
ZigType *import = source_node->owner;
|
|
|
|
ZigType *target_import;
|
|
Buf *import_target_path;
|
|
Buf full_path = BUF_INIT;
|
|
if ((err = analyze_import(ira->codegen, import, import_target_str, &target_import,
|
|
&import_target_path, &full_path)))
|
|
{
|
|
if (err == ErrorImportOutsidePkgPath) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("import of file outside package path: '%s'",
|
|
buf_ptr(import_target_path)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (err == ErrorFileNotFound) {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("unable to find '%s'", buf_ptr(import_target_path)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
ir_add_error_node(ira, source_node,
|
|
buf_sprintf("unable to open '%s': %s", buf_ptr(&full_path), err_str(err)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
return ir_const_type(ira, &import_instruction->base.base, target_import);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_ref(IrAnalyze *ira, IrInstSrcRef *ref_instruction) {
|
|
IrInstGen *value = ref_instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool is_const = false;
|
|
bool is_volatile = false;
|
|
|
|
ZigValue *child_value = value->value;
|
|
if (child_value->special == ConstValSpecialStatic) {
|
|
is_const = true;
|
|
}
|
|
|
|
return ir_get_ref(ira, &ref_instruction->base.base, value, is_const, is_volatile);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_union_init(IrAnalyze *ira, IrInst* source_instruction,
|
|
AstNode *field_source_node, ZigType *union_type, Buf *field_name, IrInstGen *field_result_loc,
|
|
IrInstGen *result_loc)
|
|
{
|
|
Error err;
|
|
assert(union_type->id == ZigTypeIdUnion);
|
|
|
|
if ((err = type_resolve(ira->codegen, union_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeUnionField *type_field = find_union_type_field(union_type, field_name);
|
|
if (type_field == nullptr) {
|
|
ir_add_error_node(ira, field_source_node,
|
|
buf_sprintf("no field named '%s' in union '%s'",
|
|
buf_ptr(field_name), buf_ptr(&union_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (type_is_invalid(type_field->type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (result_loc->value->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
if (instr_is_comptime(field_result_loc) &&
|
|
field_result_loc->value->data.x_ptr.mut != ConstPtrMutRuntimeVar)
|
|
{
|
|
// nothing
|
|
} else {
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
}
|
|
}
|
|
|
|
bool is_comptime = ir_should_inline(ira->old_irb.exec, source_instruction->scope)
|
|
|| type_requires_comptime(ira->codegen, union_type) == ReqCompTimeYes;
|
|
|
|
IrInstGen *result = ir_get_deref(ira, source_instruction, result_loc, nullptr);
|
|
if (is_comptime && !instr_is_comptime(result)) {
|
|
ir_add_error(ira, &field_result_loc->base,
|
|
buf_sprintf("unable to evaluate constant expression"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_container_init_fields(IrAnalyze *ira, IrInst *source_instr,
|
|
ZigType *container_type, size_t instr_field_count, IrInstSrcContainerInitFieldsField *fields,
|
|
IrInstGen *result_loc)
|
|
{
|
|
Error err;
|
|
if (container_type->id == ZigTypeIdUnion) {
|
|
if (instr_field_count != 1) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("union initialization expects exactly one field"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
IrInstSrcContainerInitFieldsField *field = &fields[0];
|
|
IrInstGen *field_result_loc = field->result_loc->child;
|
|
if (type_is_invalid(field_result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_union_init(ira, source_instr, field->source_node, container_type, field->name,
|
|
field_result_loc, result_loc);
|
|
}
|
|
if (container_type->id != ZigTypeIdStruct || is_slice(container_type)) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("type '%s' does not support struct initialization syntax",
|
|
buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (container_type->data.structure.resolve_status == ResolveStatusBeingInferred) {
|
|
// We're now done inferring the type.
|
|
container_type->data.structure.resolve_status = ResolveStatusUnstarted;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, container_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t actual_field_count = container_type->data.structure.src_field_count;
|
|
|
|
IrInstGen *first_non_const_instruction = nullptr;
|
|
|
|
AstNode **field_assign_nodes = heap::c_allocator.allocate<AstNode *>(actual_field_count);
|
|
ZigList<IrInstGen *> const_ptrs = {};
|
|
|
|
bool is_comptime = ir_should_inline(ira->old_irb.exec, source_instr->scope)
|
|
|| type_requires_comptime(ira->codegen, container_type) == ReqCompTimeYes;
|
|
|
|
|
|
// Here we iterate over the fields that have been initialized, and emit
|
|
// compile errors for missing fields and duplicate fields.
|
|
// It is only now that we find out whether the struct initialization can be a comptime
|
|
// value, but we have already emitted runtime instructions for the fields that
|
|
// were initialized with runtime values, and have omitted instructions that would have
|
|
// initialized fields with comptime values.
|
|
// So now we must clean up this situation. If it turns out the struct initialization can
|
|
// be a comptime value, overwrite ConstPtrMutInfer with ConstPtrMutComptimeConst.
|
|
// Otherwise, we must emit instructions to runtime-initialize the fields that have
|
|
// comptime-known values.
|
|
|
|
for (size_t i = 0; i < instr_field_count; i += 1) {
|
|
IrInstSrcContainerInitFieldsField *field = &fields[i];
|
|
|
|
IrInstGen *field_result_loc = field->result_loc->child;
|
|
if (type_is_invalid(field_result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *type_field = find_struct_type_field(container_type, field->name);
|
|
if (!type_field) {
|
|
ir_add_error_node(ira, field->source_node,
|
|
buf_sprintf("no field named '%s' in struct '%s'",
|
|
buf_ptr(field->name), buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (type_is_invalid(type_field->type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t field_index = type_field->src_index;
|
|
AstNode *existing_assign_node = field_assign_nodes[field_index];
|
|
if (existing_assign_node) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, field->source_node, buf_sprintf("duplicate field"));
|
|
add_error_note(ira->codegen, msg, existing_assign_node, buf_sprintf("other field here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
field_assign_nodes[field_index] = field->source_node;
|
|
|
|
if (instr_is_comptime(field_result_loc) &&
|
|
field_result_loc->value->data.x_ptr.mut != ConstPtrMutRuntimeVar)
|
|
{
|
|
const_ptrs.append(field_result_loc);
|
|
} else {
|
|
first_non_const_instruction = field_result_loc;
|
|
}
|
|
}
|
|
|
|
bool any_missing = false;
|
|
for (size_t i = 0; i < actual_field_count; i += 1) {
|
|
if (field_assign_nodes[i] != nullptr) continue;
|
|
|
|
// look for a default field value
|
|
TypeStructField *field = container_type->data.structure.fields[i];
|
|
memoize_field_init_val(ira->codegen, container_type, field);
|
|
if (field->init_val == nullptr) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("missing field: '%s'", buf_ptr(field->name)));
|
|
any_missing = true;
|
|
continue;
|
|
}
|
|
if (type_is_invalid(field->init_val->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *runtime_inst = ir_const(ira, source_instr, field->init_val->type);
|
|
copy_const_val(ira->codegen, runtime_inst->value, field->init_val);
|
|
|
|
IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, result_loc,
|
|
container_type, true);
|
|
ir_analyze_store_ptr(ira, source_instr, field_ptr, runtime_inst, false);
|
|
if (instr_is_comptime(field_ptr) && field_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
const_ptrs.append(field_ptr);
|
|
} else {
|
|
first_non_const_instruction = result_loc;
|
|
}
|
|
}
|
|
heap::c_allocator.deallocate(field_assign_nodes, actual_field_count);
|
|
if (any_missing)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (result_loc->value->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
if (const_ptrs.length != actual_field_count) {
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
for (size_t i = 0; i < const_ptrs.length; i += 1) {
|
|
IrInstGen *field_result_loc = const_ptrs.at(i);
|
|
IrInstGen *deref = ir_get_deref(ira, &field_result_loc->base, field_result_loc, nullptr);
|
|
field_result_loc->value->special = ConstValSpecialRuntime;
|
|
ir_analyze_store_ptr(ira, &field_result_loc->base, field_result_loc, deref, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
const_ptrs.deinit();
|
|
IrInstGen *result = ir_get_deref(ira, source_instr, result_loc, nullptr);
|
|
|
|
if (is_comptime && !instr_is_comptime(result)) {
|
|
ir_add_error_node(ira, first_non_const_instruction->base.source_node,
|
|
buf_sprintf("unable to evaluate constant expression"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_container_init_list(IrAnalyze *ira,
|
|
IrInstSrcContainerInitList *instruction)
|
|
{
|
|
ir_assert(instruction->result_loc != nullptr, &instruction->base.base);
|
|
IrInstGen *result_loc = instruction->result_loc->child;
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return result_loc;
|
|
|
|
ir_assert(result_loc->value->type->id == ZigTypeIdPointer, &instruction->base.base);
|
|
if (result_loc->value->type->data.pointer.is_const) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *container_type = result_loc->value->type->data.pointer.child_type;
|
|
size_t elem_count = instruction->item_count;
|
|
|
|
if (is_slice(container_type)) {
|
|
ir_add_error_node(ira, instruction->init_array_type_source_node,
|
|
buf_sprintf("array literal requires address-of operator to coerce to slice type '%s'",
|
|
buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (container_type->id == ZigTypeIdVoid) {
|
|
if (elem_count != 0) {
|
|
ir_add_error_node(ira, instruction->base.base.source_node,
|
|
buf_sprintf("void expression expects no arguments"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
if (container_type->id == ZigTypeIdStruct && elem_count == 0) {
|
|
ir_assert(instruction->result_loc != nullptr, &instruction->base.base);
|
|
IrInstGen *result_loc = instruction->result_loc->child;
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return result_loc;
|
|
return ir_analyze_container_init_fields(ira, &instruction->base.base, container_type, 0, nullptr, result_loc);
|
|
}
|
|
|
|
if (container_type->id == ZigTypeIdArray) {
|
|
ZigType *child_type = container_type->data.array.child_type;
|
|
if (container_type->data.array.len != elem_count) {
|
|
ZigType *literal_type = get_array_type(ira->codegen, child_type, elem_count, nullptr);
|
|
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("expected %s literal, found %s literal",
|
|
buf_ptr(&container_type->name), buf_ptr(&literal_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (container_type->id == ZigTypeIdStruct &&
|
|
container_type->data.structure.resolve_status == ResolveStatusBeingInferred)
|
|
{
|
|
// We're now done inferring the type.
|
|
container_type->data.structure.resolve_status = ResolveStatusUnstarted;
|
|
} else if (container_type->id == ZigTypeIdVector) {
|
|
// OK
|
|
} else {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("type '%s' does not support array initialization",
|
|
buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
switch (type_has_one_possible_value(ira->codegen, container_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_move(ira, &instruction->base.base,
|
|
get_the_one_possible_value(ira->codegen, container_type));
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
bool is_comptime;
|
|
switch (type_requires_comptime(ira->codegen, container_type)) {
|
|
case ReqCompTimeInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case ReqCompTimeNo:
|
|
is_comptime = ir_should_inline(ira->old_irb.exec, instruction->base.base.scope);
|
|
break;
|
|
case ReqCompTimeYes:
|
|
is_comptime = true;
|
|
break;
|
|
}
|
|
|
|
IrInstGen *first_non_const_instruction = nullptr;
|
|
|
|
// The Result Location Mechanism has already emitted runtime instructions to
|
|
// initialize runtime elements and has omitted instructions for the comptime
|
|
// elements. However it is only now that we find out whether the array initialization
|
|
// can be a comptime value. So we must clean up the situation. If it turns out
|
|
// array initialization can be a comptime value, overwrite ConstPtrMutInfer with
|
|
// ConstPtrMutComptimeConst. Otherwise, emit instructions to runtime-initialize the
|
|
// elements that have comptime-known values.
|
|
ZigList<IrInstGen *> const_ptrs = {};
|
|
|
|
for (size_t i = 0; i < elem_count; i += 1) {
|
|
IrInstGen *elem_result_loc = instruction->elem_result_loc_list[i]->child;
|
|
if (type_is_invalid(elem_result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(elem_result_loc->value->type->id == ZigTypeIdPointer);
|
|
|
|
if (instr_is_comptime(elem_result_loc) &&
|
|
elem_result_loc->value->data.x_ptr.mut != ConstPtrMutRuntimeVar)
|
|
{
|
|
const_ptrs.append(elem_result_loc);
|
|
} else {
|
|
first_non_const_instruction = elem_result_loc;
|
|
}
|
|
}
|
|
|
|
if (result_loc->value->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
if (const_ptrs.length != elem_count) {
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
for (size_t i = 0; i < const_ptrs.length; i += 1) {
|
|
IrInstGen *elem_result_loc = const_ptrs.at(i);
|
|
assert(elem_result_loc->value->special == ConstValSpecialStatic);
|
|
if (elem_result_loc->value->type->data.pointer.inferred_struct_field != nullptr) {
|
|
// This field will be generated comptime; no need to do this.
|
|
continue;
|
|
}
|
|
IrInstGen *deref = ir_get_deref(ira, &elem_result_loc->base, elem_result_loc, nullptr);
|
|
elem_result_loc->value->special = ConstValSpecialRuntime;
|
|
ir_analyze_store_ptr(ira, &elem_result_loc->base, elem_result_loc, deref, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
const_ptrs.deinit();
|
|
|
|
IrInstGen *result = ir_get_deref(ira, &instruction->base.base, result_loc, nullptr);
|
|
// If the result is a tuple, we are allowed to return a struct that uses ConstValSpecialRuntime fields at comptime.
|
|
if (instr_is_comptime(result) || is_tuple(container_type))
|
|
return result;
|
|
|
|
if (is_comptime) {
|
|
ir_add_error(ira, &first_non_const_instruction->base,
|
|
buf_sprintf("unable to evaluate constant expression"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *result_elem_type = result_loc->value->type->data.pointer.child_type;
|
|
if (is_slice(result_elem_type)) {
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("runtime-initialized array cannot be casted to slice type '%s'",
|
|
buf_ptr(&result_elem_type->name)));
|
|
add_error_note(ira->codegen, msg, first_non_const_instruction->base.source_node,
|
|
buf_sprintf("this value is not comptime-known"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_container_init_fields(IrAnalyze *ira,
|
|
IrInstSrcContainerInitFields *instruction)
|
|
{
|
|
ir_assert(instruction->result_loc != nullptr, &instruction->base.base);
|
|
IrInstGen *result_loc = instruction->result_loc->child;
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return result_loc;
|
|
|
|
ir_assert(result_loc->value->type->id == ZigTypeIdPointer, &instruction->base.base);
|
|
if (result_loc->value->type->data.pointer.is_const) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *container_type = result_loc->value->type->data.pointer.child_type;
|
|
|
|
return ir_analyze_container_init_fields(ira, &instruction->base.base, container_type,
|
|
instruction->field_count, instruction->fields, result_loc);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_compile_err(IrAnalyze *ira, IrInstSrcCompileErr *instruction) {
|
|
IrInstGen *msg_value = instruction->msg->child;
|
|
Buf *msg_buf = ir_resolve_str(ira, msg_value);
|
|
if (!msg_buf)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ir_add_error(ira, &instruction->base.base, msg_buf);
|
|
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_compile_log(IrAnalyze *ira, IrInstSrcCompileLog *instruction) {
|
|
Buf buf = BUF_INIT;
|
|
fprintf(stderr, "| ");
|
|
for (size_t i = 0; i < instruction->msg_count; i += 1) {
|
|
IrInstGen *msg = instruction->msg_list[i]->child;
|
|
if (type_is_invalid(msg->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
buf_resize(&buf, 0);
|
|
if (msg->value->special == ConstValSpecialLazy) {
|
|
// Resolve any lazy value that's passed, we need its value
|
|
if (ir_resolve_lazy(ira->codegen, msg->base.source_node, msg->value))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
render_const_value(ira->codegen, &buf, msg->value);
|
|
const char *comma_str = (i != 0) ? ", " : "";
|
|
fprintf(stderr, "%s%s", comma_str, buf_ptr(&buf));
|
|
}
|
|
fprintf(stderr, "\n");
|
|
|
|
auto *expr = &instruction->base.base.source_node->data.fn_call_expr;
|
|
if (!expr->seen) {
|
|
// Here we bypass higher level functions such as ir_add_error because we do not want
|
|
// invalidate_exec to be called.
|
|
add_node_error(ira->codegen, instruction->base.base.source_node, buf_sprintf("found compile log statement"));
|
|
}
|
|
expr->seen = true;
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstSrcErrName *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_global_error_set);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown, 0, 0, 0, false);
|
|
ZigType *str_type = get_slice_type(ira->codegen, u8_ptr_type);
|
|
if (instr_is_comptime(casted_value)) {
|
|
ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ErrorTableEntry *err = casted_value->value->data.x_err_set;
|
|
if (!err->cached_error_name_val) {
|
|
ZigValue *array_val = create_const_str_lit(ira->codegen, &err->name)->data.x_ptr.data.ref.pointee;
|
|
err->cached_error_name_val = create_const_slice(ira->codegen, array_val, 0, buf_len(&err->name), true);
|
|
}
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
copy_const_val(ira->codegen, result->value, err->cached_error_name_val);
|
|
result->value->type = str_type;
|
|
return result;
|
|
}
|
|
|
|
ira->codegen->generate_error_name_table = true;
|
|
|
|
return ir_build_err_name_gen(ira, &instruction->base.base, value, str_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrInstSrcTagName *instruction) {
|
|
Error err;
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *target_type = target->value->type;
|
|
|
|
if (target_type->id == ZigTypeIdEnumLiteral) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
Buf *field_name = target->value->data.x_enum_literal;
|
|
ZigValue *array_val = create_const_str_lit(ira->codegen, field_name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(field_name), true);
|
|
return result;
|
|
}
|
|
|
|
if (target_type->id == ZigTypeIdUnion) {
|
|
target = ir_analyze_union_tag(ira, &instruction->base.base, target, instruction->base.is_gen);
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
target_type = target->value->type;
|
|
}
|
|
|
|
if (target_type->id != ZigTypeIdEnum) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("expected enum tag, found '%s'", buf_ptr(&target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (can_fold_enum_type(target_type)) {
|
|
TypeEnumField *only_field = &target_type->data.enumeration.fields[0];
|
|
ZigValue *array_val = create_const_str_lit(ira->codegen, only_field->name)->data.x_ptr.data.ref.pointee;
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(only_field->name), true);
|
|
return result;
|
|
}
|
|
|
|
if (instr_is_comptime(target)) {
|
|
if ((err = type_resolve(ira->codegen, target_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
TypeEnumField *field = find_enum_field_by_tag(target_type, &target->value->data.x_bigint);
|
|
if (field == nullptr) {
|
|
Buf *int_buf = buf_alloc();
|
|
bigint_append_buf(int_buf, &target->value->data.x_bigint, 10);
|
|
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("no tag by value %s", buf_ptr(int_buf)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigValue *array_val = create_const_str_lit(ira->codegen, field->name)->data.x_ptr.data.ref.pointee;
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
init_const_slice(ira->codegen, result->value, array_val, 0, buf_len(field->name), true);
|
|
return result;
|
|
}
|
|
|
|
ZigType *u8_ptr_type = get_pointer_to_type_extra(
|
|
ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown,
|
|
0, 0, 0, false);
|
|
ZigType *result_type = get_slice_type(ira->codegen, u8_ptr_type);
|
|
return ir_build_tag_name_gen(ira, &instruction->base.base, target, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
|
|
IrInstSrcFieldParentPtr *instruction)
|
|
{
|
|
Error err;
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
ZigType *container_type = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(container_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *field_name_value = instruction->field_name->child;
|
|
Buf *field_name = ir_resolve_str(ira, field_name_value);
|
|
if (!field_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *field_ptr = instruction->field_ptr->child;
|
|
if (type_is_invalid(field_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (container_type->id != ZigTypeIdStruct) {
|
|
ir_add_error(ira, &type_value->base,
|
|
buf_sprintf("expected struct type, found '%s'", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, container_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
TypeStructField *field = find_struct_type_field(container_type, field_name);
|
|
if (field == nullptr) {
|
|
ir_add_error(ira, &field_name_value->base,
|
|
buf_sprintf("struct '%s' has no field '%s'",
|
|
buf_ptr(&container_type->name), buf_ptr(field_name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (field_ptr->value->type->id != ZigTypeIdPointer) {
|
|
ir_add_error(ira, &field_ptr->base,
|
|
buf_sprintf("expected pointer, found '%s'", buf_ptr(&field_ptr->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
bool is_packed = (container_type->data.structure.layout == ContainerLayoutPacked);
|
|
uint32_t field_ptr_align = is_packed ? 1 : get_abi_alignment(ira->codegen, field->type_entry);
|
|
uint32_t parent_ptr_align = is_packed ? 1 : get_abi_alignment(ira->codegen, container_type);
|
|
|
|
ZigType *field_ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry,
|
|
field_ptr->value->type->data.pointer.is_const,
|
|
field_ptr->value->type->data.pointer.is_volatile,
|
|
PtrLenSingle,
|
|
field_ptr_align, 0, 0, false);
|
|
IrInstGen *casted_field_ptr = ir_implicit_cast(ira, field_ptr, field_ptr_type);
|
|
if (type_is_invalid(casted_field_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *result_type = get_pointer_to_type_extra(ira->codegen, container_type,
|
|
casted_field_ptr->value->type->data.pointer.is_const,
|
|
casted_field_ptr->value->type->data.pointer.is_volatile,
|
|
PtrLenSingle,
|
|
parent_ptr_align, 0, 0, false);
|
|
|
|
if (instr_is_comptime(casted_field_ptr)) {
|
|
ZigValue *field_ptr_val = ir_resolve_const(ira, casted_field_ptr, UndefBad);
|
|
if (!field_ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (field_ptr_val->data.x_ptr.special != ConstPtrSpecialBaseStruct) {
|
|
ir_add_error(ira, &field_ptr->base, buf_sprintf("pointer value not based on parent struct"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
size_t ptr_field_index = field_ptr_val->data.x_ptr.data.base_struct.field_index;
|
|
if (ptr_field_index != field->src_index) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("field '%s' has index %" ZIG_PRI_usize " but pointer value is index %" ZIG_PRI_usize " of struct '%s'",
|
|
buf_ptr(field->name), field->src_index,
|
|
ptr_field_index, buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
|
|
ZigValue *out_val = result->value;
|
|
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
|
out_val->data.x_ptr.data.ref.pointee = field_ptr_val->data.x_ptr.data.base_struct.struct_val;
|
|
out_val->data.x_ptr.mut = field_ptr_val->data.x_ptr.mut;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_field_parent_ptr_gen(ira, &instruction->base.base, casted_field_ptr, field, result_type);
|
|
}
|
|
|
|
static TypeStructField *validate_host_int_byte_offset(IrAnalyze *ira,
|
|
IrInstGen *type_value,
|
|
IrInstGen *field_name_value,
|
|
size_t *byte_offset)
|
|
{
|
|
ZigType *container_type = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(container_type))
|
|
return nullptr;
|
|
|
|
Error err;
|
|
if ((err = type_resolve(ira->codegen, container_type, ResolveStatusSizeKnown)))
|
|
return nullptr;
|
|
|
|
Buf *field_name = ir_resolve_str(ira, field_name_value);
|
|
if (!field_name)
|
|
return nullptr;
|
|
|
|
if (container_type->id != ZigTypeIdStruct) {
|
|
ir_add_error(ira, &type_value->base,
|
|
buf_sprintf("expected struct type, found '%s'", buf_ptr(&container_type->name)));
|
|
return nullptr;
|
|
}
|
|
|
|
TypeStructField *field = find_struct_type_field(container_type, field_name);
|
|
if (field == nullptr) {
|
|
ir_add_error(ira, &field_name_value->base,
|
|
buf_sprintf("struct '%s' has no field '%s'",
|
|
buf_ptr(&container_type->name), buf_ptr(field_name)));
|
|
return nullptr;
|
|
}
|
|
|
|
if (!type_has_bits(ira->codegen, field->type_entry)) {
|
|
ir_add_error(ira, &field_name_value->base,
|
|
buf_sprintf("zero-bit field '%s' in struct '%s' has no offset",
|
|
buf_ptr(field_name), buf_ptr(&container_type->name)));
|
|
return nullptr;
|
|
}
|
|
|
|
*byte_offset = field->offset;
|
|
return field;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_byte_offset_of(IrAnalyze *ira, IrInstSrcByteOffsetOf *instruction) {
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
if (type_is_invalid(type_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *field_name_value = instruction->field_name->child;
|
|
size_t host_int_byte_offset = 0;
|
|
TypeStructField *field = nullptr;
|
|
if (!(field = validate_host_int_byte_offset(ira, type_value, field_name_value, &host_int_byte_offset)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t byte_offset = host_int_byte_offset + (field->bit_offset_in_host / 8);
|
|
return ir_const_unsigned(ira, &instruction->base.base, byte_offset);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bit_offset_of(IrAnalyze *ira, IrInstSrcBitOffsetOf *instruction) {
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
if (type_is_invalid(type_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *field_name_value = instruction->field_name->child;
|
|
size_t host_int_byte_offset = 0;
|
|
TypeStructField *field = nullptr;
|
|
if (!(field = validate_host_int_byte_offset(ira, type_value, field_name_value, &host_int_byte_offset)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
size_t bit_offset = host_int_byte_offset * 8 + field->bit_offset_in_host;
|
|
return ir_const_unsigned(ira, &instruction->base.base, bit_offset);
|
|
}
|
|
|
|
static void ensure_field_index(ZigType *type, const char *field_name, size_t index) {
|
|
Buf *field_name_buf;
|
|
|
|
assert(type != nullptr && !type_is_invalid(type));
|
|
field_name_buf = buf_create_from_str(field_name);
|
|
TypeStructField *field = find_struct_type_field(type, field_name_buf);
|
|
buf_deinit(field_name_buf);
|
|
|
|
if (field == nullptr || field->src_index != index)
|
|
zig_panic("reference to unknown field %s", field_name);
|
|
}
|
|
|
|
static ZigType *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, ZigType *root) {
|
|
Error err;
|
|
ZigType *type_info_type = get_builtin_type(ira->codegen, "TypeInfo");
|
|
assert(type_info_type->id == ZigTypeIdUnion);
|
|
if ((err = type_resolve(ira->codegen, type_info_type, ResolveStatusSizeKnown))) {
|
|
zig_unreachable();
|
|
}
|
|
|
|
if (type_name == nullptr && root == nullptr)
|
|
return type_info_type;
|
|
else if (type_name == nullptr)
|
|
return root;
|
|
|
|
ZigType *root_type = (root == nullptr) ? type_info_type : root;
|
|
|
|
ScopeDecls *type_info_scope = get_container_scope(root_type);
|
|
assert(type_info_scope != nullptr);
|
|
|
|
Buf field_name = BUF_INIT;
|
|
buf_init_from_str(&field_name, type_name);
|
|
auto entry = type_info_scope->decl_table.get(&field_name);
|
|
buf_deinit(&field_name);
|
|
|
|
TldVar *tld = (TldVar *)entry;
|
|
assert(tld->base.id == TldIdVar);
|
|
|
|
ZigVar *var = tld->var;
|
|
|
|
assert(var->const_value->type->id == ZigTypeIdMetaType);
|
|
|
|
return ir_resolve_const_type(ira->codegen, ira->new_irb.exec, nullptr, var->const_value);
|
|
}
|
|
|
|
static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigValue *out_val,
|
|
ScopeDecls *decls_scope, bool resolve_types)
|
|
{
|
|
Error err;
|
|
ZigType *type_info_declaration_type = ir_type_info_get_type(ira, "Declaration", nullptr);
|
|
if ((err = type_resolve(ira->codegen, type_info_declaration_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
ensure_field_index(type_info_declaration_type, "name", 0);
|
|
ensure_field_index(type_info_declaration_type, "is_pub", 1);
|
|
ensure_field_index(type_info_declaration_type, "data", 2);
|
|
|
|
if (!resolve_types) {
|
|
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, type_info_declaration_type,
|
|
false, false, PtrLenUnknown, 0, 0, 0, false);
|
|
|
|
out_val->special = ConstValSpecialLazy;
|
|
out_val->type = get_slice_type(ira->codegen, ptr_type);
|
|
|
|
LazyValueTypeInfoDecls *lazy_type_info_decls = heap::c_allocator.create<LazyValueTypeInfoDecls>();
|
|
lazy_type_info_decls->ira = ira; ira_ref(ira);
|
|
out_val->data.x_lazy = &lazy_type_info_decls->base;
|
|
lazy_type_info_decls->base.id = LazyValueIdTypeInfoDecls;
|
|
|
|
lazy_type_info_decls->source_instr = source_instr;
|
|
lazy_type_info_decls->decls_scope = decls_scope;
|
|
|
|
return ErrorNone;
|
|
}
|
|
|
|
ZigType *type_info_declaration_data_type = ir_type_info_get_type(ira, "Data", type_info_declaration_type);
|
|
if ((err = type_resolve(ira->codegen, type_info_declaration_data_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
ZigType *type_info_fn_decl_type = ir_type_info_get_type(ira, "FnDecl", type_info_declaration_data_type);
|
|
if ((err = type_resolve(ira->codegen, type_info_fn_decl_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
ZigType *type_info_fn_decl_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_decl_type);
|
|
if ((err = type_resolve(ira->codegen, type_info_fn_decl_inline_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
resolve_container_usingnamespace_decls(ira->codegen, decls_scope);
|
|
|
|
// The unresolved declarations are collected in a separate queue to avoid
|
|
// modifying decl_table while iterating over it
|
|
ZigList<Tld*> resolve_decl_queue{};
|
|
|
|
auto decl_it = decls_scope->decl_table.entry_iterator();
|
|
decltype(decls_scope->decl_table)::Entry *curr_entry = nullptr;
|
|
while ((curr_entry = decl_it.next()) != nullptr) {
|
|
if (curr_entry->value->resolution == TldResolutionInvalid) {
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if (curr_entry->value->resolution == TldResolutionResolving) {
|
|
ir_error_dependency_loop(ira, source_instr);
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
// If the declaration is unresolved, force it to be resolved again.
|
|
if (curr_entry->value->resolution == TldResolutionUnresolved)
|
|
resolve_decl_queue.append(curr_entry->value);
|
|
}
|
|
|
|
for (size_t i = 0; i < resolve_decl_queue.length; i++) {
|
|
Tld *decl = resolve_decl_queue.at(i);
|
|
resolve_top_level_decl(ira->codegen, decl, decl->source_node, false);
|
|
if (decl->resolution == TldResolutionInvalid) {
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
}
|
|
|
|
resolve_decl_queue.deinit();
|
|
|
|
// Loop through our declarations once to figure out how many declarations we will generate info for.
|
|
int declaration_count = 0;
|
|
decl_it = decls_scope->decl_table.entry_iterator();
|
|
while ((curr_entry = decl_it.next()) != nullptr) {
|
|
// Skip comptime blocks and test functions.
|
|
if (curr_entry->value->id == TldIdCompTime)
|
|
continue;
|
|
|
|
if (curr_entry->value->id == TldIdFn) {
|
|
ZigFn *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
|
|
if (fn_entry->is_test)
|
|
continue;
|
|
}
|
|
|
|
declaration_count += 1;
|
|
}
|
|
|
|
ZigValue *declaration_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
declaration_array->special = ConstValSpecialStatic;
|
|
declaration_array->type = get_array_type(ira->codegen, type_info_declaration_type, declaration_count, nullptr);
|
|
declaration_array->data.x_array.special = ConstArraySpecialNone;
|
|
declaration_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(declaration_count);
|
|
init_const_slice(ira->codegen, out_val, declaration_array, 0, declaration_count, false);
|
|
|
|
// Loop through the declarations and generate info.
|
|
decl_it = decls_scope->decl_table.entry_iterator();
|
|
curr_entry = nullptr;
|
|
int declaration_index = 0;
|
|
while ((curr_entry = decl_it.next()) != nullptr) {
|
|
// Skip comptime blocks and test functions.
|
|
if (curr_entry->value->id == TldIdCompTime) {
|
|
continue;
|
|
} else if (curr_entry->value->id == TldIdFn) {
|
|
ZigFn *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
|
|
if (fn_entry->is_test)
|
|
continue;
|
|
}
|
|
|
|
ZigValue *declaration_val = &declaration_array->data.x_array.data.s_none.elements[declaration_index];
|
|
|
|
declaration_val->special = ConstValSpecialStatic;
|
|
declaration_val->type = type_info_declaration_type;
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 3);
|
|
ZigValue *name = create_const_str_lit(ira->codegen, curr_entry->key)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(curr_entry->key), true);
|
|
inner_fields[1]->special = ConstValSpecialStatic;
|
|
inner_fields[1]->type = ira->codegen->builtin_types.entry_bool;
|
|
inner_fields[1]->data.x_bool = curr_entry->value->visib_mod == VisibModPub;
|
|
inner_fields[2]->special = ConstValSpecialStatic;
|
|
inner_fields[2]->type = type_info_declaration_data_type;
|
|
inner_fields[2]->parent.id = ConstParentIdStruct;
|
|
inner_fields[2]->parent.data.p_struct.struct_val = declaration_val;
|
|
inner_fields[2]->parent.data.p_struct.field_index = 1;
|
|
|
|
switch (curr_entry->value->id) {
|
|
case TldIdVar:
|
|
{
|
|
ZigVar *var = ((TldVar *)curr_entry->value)->var;
|
|
assert(var != nullptr);
|
|
|
|
if ((err = type_resolve(ira->codegen, var->const_value->type, ResolveStatusSizeKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if (var->const_value->type->id == ZigTypeIdMetaType) {
|
|
// We have a variable of type 'type', so it's actually a type declaration.
|
|
// 0: Data.Type: type
|
|
bigint_init_unsigned(&inner_fields[2]->data.x_union.tag, 0);
|
|
inner_fields[2]->data.x_union.payload = var->const_value;
|
|
} else {
|
|
// We have a variable of another type, so we store the type of the variable.
|
|
// 1: Data.Var: type
|
|
bigint_init_unsigned(&inner_fields[2]->data.x_union.tag, 1);
|
|
|
|
ZigValue *payload = ira->codegen->pass1_arena->create<ZigValue>();
|
|
payload->special = ConstValSpecialStatic;
|
|
payload->type = ira->codegen->builtin_types.entry_type;
|
|
payload->data.x_type = var->const_value->type;
|
|
|
|
inner_fields[2]->data.x_union.payload = payload;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case TldIdFn:
|
|
{
|
|
// 2: Data.Fn: Data.FnDecl
|
|
bigint_init_unsigned(&inner_fields[2]->data.x_union.tag, 2);
|
|
|
|
ZigFn *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
|
|
assert(!fn_entry->is_test);
|
|
assert(fn_entry->type_entry != nullptr);
|
|
|
|
AstNodeFnProto *fn_node = &fn_entry->proto_node->data.fn_proto;
|
|
|
|
ZigValue *fn_decl_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
fn_decl_val->special = ConstValSpecialStatic;
|
|
fn_decl_val->type = type_info_fn_decl_type;
|
|
fn_decl_val->parent.id = ConstParentIdUnion;
|
|
fn_decl_val->parent.data.p_union.union_val = inner_fields[2];
|
|
|
|
ZigValue **fn_decl_fields = alloc_const_vals_ptrs(ira->codegen, 9);
|
|
fn_decl_val->data.x_struct.fields = fn_decl_fields;
|
|
|
|
// fn_type: type
|
|
ensure_field_index(fn_decl_val->type, "fn_type", 0);
|
|
fn_decl_fields[0]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[0]->type = ira->codegen->builtin_types.entry_type;
|
|
fn_decl_fields[0]->data.x_type = fn_entry->type_entry;
|
|
// inline_type: Data.FnDecl.Inline
|
|
ensure_field_index(fn_decl_val->type, "inline_type", 1);
|
|
fn_decl_fields[1]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[1]->type = type_info_fn_decl_inline_type;
|
|
bigint_init_unsigned(&fn_decl_fields[1]->data.x_enum_tag, fn_entry->fn_inline);
|
|
// is_var_args: bool
|
|
ensure_field_index(fn_decl_val->type, "is_var_args", 2);
|
|
bool is_varargs = fn_node->is_var_args;
|
|
fn_decl_fields[2]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[2]->type = ira->codegen->builtin_types.entry_bool;
|
|
fn_decl_fields[2]->data.x_bool = is_varargs;
|
|
// is_extern: bool
|
|
ensure_field_index(fn_decl_val->type, "is_extern", 3);
|
|
fn_decl_fields[3]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[3]->type = ira->codegen->builtin_types.entry_bool;
|
|
fn_decl_fields[3]->data.x_bool = fn_node->is_extern;
|
|
// is_export: bool
|
|
ensure_field_index(fn_decl_val->type, "is_export", 4);
|
|
fn_decl_fields[4]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[4]->type = ira->codegen->builtin_types.entry_bool;
|
|
fn_decl_fields[4]->data.x_bool = fn_node->is_export;
|
|
// lib_name: ?[]const u8
|
|
ensure_field_index(fn_decl_val->type, "lib_name", 5);
|
|
fn_decl_fields[5]->special = ConstValSpecialStatic;
|
|
ZigType *u8_ptr = get_pointer_to_type_extra(
|
|
ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown,
|
|
0, 0, 0, false);
|
|
fn_decl_fields[5]->type = get_optional_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
|
|
if (fn_node->is_extern && fn_node->lib_name != nullptr && buf_len(fn_node->lib_name) > 0) {
|
|
ZigValue *slice_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
ZigValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, slice_val, lib_name, 0, buf_len(fn_node->lib_name), true);
|
|
set_optional_payload(fn_decl_fields[5], slice_val);
|
|
} else {
|
|
set_optional_payload(fn_decl_fields[5], nullptr);
|
|
}
|
|
// return_type: type
|
|
ensure_field_index(fn_decl_val->type, "return_type", 6);
|
|
fn_decl_fields[6]->special = ConstValSpecialStatic;
|
|
fn_decl_fields[6]->type = ira->codegen->builtin_types.entry_type;
|
|
fn_decl_fields[6]->data.x_type = fn_entry->type_entry->data.fn.fn_type_id.return_type;
|
|
// arg_names: [][] const u8
|
|
ensure_field_index(fn_decl_val->type, "arg_names", 7);
|
|
size_t fn_arg_count = fn_entry->variable_list.length;
|
|
ZigValue *fn_arg_name_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
fn_arg_name_array->special = ConstValSpecialStatic;
|
|
fn_arg_name_array->type = get_array_type(ira->codegen,
|
|
get_slice_type(ira->codegen, u8_ptr), fn_arg_count, nullptr);
|
|
fn_arg_name_array->data.x_array.special = ConstArraySpecialNone;
|
|
fn_arg_name_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(fn_arg_count);
|
|
|
|
init_const_slice(ira->codegen, fn_decl_fields[7], fn_arg_name_array, 0, fn_arg_count, false);
|
|
|
|
for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
|
|
ZigVar *arg_var = fn_entry->variable_list.at(fn_arg_index);
|
|
ZigValue *fn_arg_name_val = &fn_arg_name_array->data.x_array.data.s_none.elements[fn_arg_index];
|
|
ZigValue *arg_name = create_const_str_lit(ira->codegen,
|
|
buf_create_from_str(arg_var->name))->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, fn_arg_name_val, arg_name, 0, strlen(arg_var->name), true);
|
|
fn_arg_name_val->parent.id = ConstParentIdArray;
|
|
fn_arg_name_val->parent.data.p_array.array_val = fn_arg_name_array;
|
|
fn_arg_name_val->parent.data.p_array.elem_index = fn_arg_index;
|
|
}
|
|
|
|
inner_fields[2]->data.x_union.payload = fn_decl_val;
|
|
break;
|
|
}
|
|
case TldIdContainer:
|
|
{
|
|
ZigType *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
|
|
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
// This is a type.
|
|
bigint_init_unsigned(&inner_fields[2]->data.x_union.tag, 0);
|
|
|
|
ZigValue *payload = ira->codegen->pass1_arena->create<ZigValue>();
|
|
payload->special = ConstValSpecialStatic;
|
|
payload->type = ira->codegen->builtin_types.entry_type;
|
|
payload->data.x_type = type_entry;
|
|
|
|
inner_fields[2]->data.x_union.payload = payload;
|
|
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
|
|
declaration_val->data.x_struct.fields = inner_fields;
|
|
declaration_index += 1;
|
|
}
|
|
|
|
assert(declaration_index == declaration_count);
|
|
return ErrorNone;
|
|
}
|
|
|
|
static BuiltinPtrSize ptr_len_to_size_enum_index(PtrLen ptr_len) {
|
|
switch (ptr_len) {
|
|
case PtrLenSingle:
|
|
return BuiltinPtrSizeOne;
|
|
case PtrLenUnknown:
|
|
return BuiltinPtrSizeMany;
|
|
case PtrLenC:
|
|
return BuiltinPtrSizeC;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static PtrLen size_enum_index_to_ptr_len(BuiltinPtrSize size_enum_index) {
|
|
switch (size_enum_index) {
|
|
case BuiltinPtrSizeOne:
|
|
return PtrLenSingle;
|
|
case BuiltinPtrSizeMany:
|
|
case BuiltinPtrSizeSlice:
|
|
return PtrLenUnknown;
|
|
case BuiltinPtrSizeC:
|
|
return PtrLenC;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, IrInst *source_instr, ZigType *ptr_type_entry) {
|
|
ZigType *attrs_type;
|
|
BuiltinPtrSize size_enum_index;
|
|
if (is_slice(ptr_type_entry)) {
|
|
TypeStructField *ptr_field = ptr_type_entry->data.structure.fields[slice_ptr_index];
|
|
attrs_type = resolve_struct_field_type(ira->codegen, ptr_field);
|
|
size_enum_index = BuiltinPtrSizeSlice;
|
|
} else if (ptr_type_entry->id == ZigTypeIdPointer) {
|
|
attrs_type = ptr_type_entry;
|
|
size_enum_index = ptr_len_to_size_enum_index(ptr_type_entry->data.pointer.ptr_len);
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
ZigType *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
|
|
assertNoError(type_resolve(ira->codegen, type_info_pointer_type, ResolveStatusSizeKnown));
|
|
|
|
ZigValue *result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = type_info_pointer_type;
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 7);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// size: Size
|
|
ensure_field_index(result->type, "size", 0);
|
|
ZigType *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
|
|
assertNoError(type_resolve(ira->codegen, type_info_pointer_size_type, ResolveStatusSizeKnown));
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = type_info_pointer_size_type;
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, size_enum_index);
|
|
|
|
// is_const: bool
|
|
ensure_field_index(result->type, "is_const", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[1]->data.x_bool = attrs_type->data.pointer.is_const;
|
|
// is_volatile: bool
|
|
ensure_field_index(result->type, "is_volatile", 2);
|
|
fields[2]->special = ConstValSpecialStatic;
|
|
fields[2]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[2]->data.x_bool = attrs_type->data.pointer.is_volatile;
|
|
// alignment: comptime_int
|
|
ensure_field_index(result->type, "alignment", 3);
|
|
fields[3]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
if (attrs_type->data.pointer.explicit_alignment != 0) {
|
|
fields[3]->special = ConstValSpecialStatic;
|
|
bigint_init_unsigned(&fields[3]->data.x_bigint, attrs_type->data.pointer.explicit_alignment);
|
|
} else {
|
|
LazyValueAlignOf *lazy_align_of = heap::c_allocator.create<LazyValueAlignOf>();
|
|
lazy_align_of->ira = ira; ira_ref(ira);
|
|
fields[3]->special = ConstValSpecialLazy;
|
|
fields[3]->data.x_lazy = &lazy_align_of->base;
|
|
lazy_align_of->base.id = LazyValueIdAlignOf;
|
|
lazy_align_of->target_type = ir_const_type(ira, source_instr, attrs_type->data.pointer.child_type);
|
|
}
|
|
// child: type
|
|
ensure_field_index(result->type, "child", 4);
|
|
fields[4]->special = ConstValSpecialStatic;
|
|
fields[4]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[4]->data.x_type = attrs_type->data.pointer.child_type;
|
|
// is_allowzero: bool
|
|
ensure_field_index(result->type, "is_allowzero", 5);
|
|
fields[5]->special = ConstValSpecialStatic;
|
|
fields[5]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[5]->data.x_bool = attrs_type->data.pointer.allow_zero;
|
|
// sentinel: anytype
|
|
ensure_field_index(result->type, "sentinel", 6);
|
|
fields[6]->special = ConstValSpecialStatic;
|
|
if (attrs_type->data.pointer.sentinel != nullptr) {
|
|
fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
|
|
set_optional_payload(fields[6], attrs_type->data.pointer.sentinel);
|
|
} else {
|
|
fields[6]->type = ira->codegen->builtin_types.entry_null;
|
|
}
|
|
|
|
return result;
|
|
};
|
|
|
|
static void make_enum_field_val(IrAnalyze *ira, ZigValue *enum_field_val, TypeEnumField *enum_field,
|
|
ZigType *type_info_enum_field_type)
|
|
{
|
|
enum_field_val->special = ConstValSpecialStatic;
|
|
enum_field_val->type = type_info_enum_field_type;
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
inner_fields[1]->special = ConstValSpecialStatic;
|
|
inner_fields[1]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
|
|
ZigValue *name = create_const_str_lit(ira->codegen, enum_field->name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(enum_field->name), true);
|
|
|
|
bigint_init_bigint(&inner_fields[1]->data.x_bigint, &enum_field->value);
|
|
|
|
enum_field_val->data.x_struct.fields = inner_fields;
|
|
}
|
|
|
|
static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigType *type_entry,
|
|
ZigValue **out)
|
|
{
|
|
Error err;
|
|
assert(type_entry != nullptr);
|
|
assert(!type_is_invalid(type_entry));
|
|
|
|
auto entry = ira->codegen->type_info_cache.maybe_get(type_entry);
|
|
if (entry != nullptr) {
|
|
*out = entry->value;
|
|
return ErrorNone;
|
|
}
|
|
|
|
ZigValue *result = nullptr;
|
|
switch (type_entry->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
result = ira->codegen->intern.for_void();
|
|
break;
|
|
case ZigTypeIdInt:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Int", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// is_signed: Signedness
|
|
ensure_field_index(result->type, "signedness", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = get_builtin_type(ira->codegen, "Signedness");
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, !type_entry->data.integral.is_signed);
|
|
// bits: u8
|
|
ensure_field_index(result->type, "bits", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&fields[1]->data.x_bigint, type_entry->data.integral.bit_count);
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdFloat:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Float", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// bits: u8
|
|
ensure_field_index(result->type, "bits", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&fields[0]->data.x_bigint, type_entry->data.floating.bit_count);
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdPointer:
|
|
{
|
|
result = create_ptr_like_type_info(ira, source_instr, type_entry);
|
|
if (result == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
break;
|
|
}
|
|
case ZigTypeIdArray:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Array", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 3);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// len: usize
|
|
ensure_field_index(result->type, "len", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&fields[0]->data.x_bigint, type_entry->data.array.len);
|
|
// child: type
|
|
ensure_field_index(result->type, "child", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[1]->data.x_type = type_entry->data.array.child_type;
|
|
// sentinel: anytype
|
|
fields[2]->special = ConstValSpecialStatic;
|
|
if (type_entry->data.array.child_type != nullptr) {
|
|
fields[2]->type = get_optional_type(ira->codegen, type_entry->data.array.child_type);
|
|
set_optional_payload(fields[2], type_entry->data.array.sentinel);
|
|
} else {
|
|
fields[2]->type = ira->codegen->builtin_types.entry_null;
|
|
}
|
|
break;
|
|
}
|
|
case ZigTypeIdVector: {
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Vector", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// len: usize
|
|
ensure_field_index(result->type, "len", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&fields[0]->data.x_bigint, type_entry->data.vector.len);
|
|
// child: type
|
|
ensure_field_index(result->type, "child", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[1]->data.x_type = type_entry->data.vector.elem_type;
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdOptional:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Optional", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// child: type
|
|
ensure_field_index(result->type, "child", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[0]->data.x_type = type_entry->data.maybe.child_type;
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdAnyFrame: {
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// child: ?type
|
|
ensure_field_index(result->type, "child", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
|
|
fields[0]->data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr :
|
|
create_const_type(ira->codegen, type_entry->data.any_frame.result_type);
|
|
break;
|
|
}
|
|
case ZigTypeIdEnum:
|
|
{
|
|
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Enum", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 5);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// layout: ContainerLayout
|
|
ensure_field_index(result->type, "layout", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.enumeration.layout);
|
|
// tag_type: type
|
|
ensure_field_index(result->type, "tag_type", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[1]->data.x_type = type_entry->data.enumeration.tag_int_type;
|
|
// fields: []TypeInfo.EnumField
|
|
ensure_field_index(result->type, "fields", 2);
|
|
|
|
ZigType *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField", nullptr);
|
|
if ((err = type_resolve(ira->codegen, type_info_enum_field_type, ResolveStatusSizeKnown))) {
|
|
zig_unreachable();
|
|
}
|
|
uint32_t enum_field_count = type_entry->data.enumeration.src_field_count;
|
|
|
|
ZigValue *enum_field_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
enum_field_array->special = ConstValSpecialStatic;
|
|
enum_field_array->type = get_array_type(ira->codegen, type_info_enum_field_type, enum_field_count, nullptr);
|
|
enum_field_array->data.x_array.special = ConstArraySpecialNone;
|
|
enum_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(enum_field_count);
|
|
|
|
init_const_slice(ira->codegen, fields[2], enum_field_array, 0, enum_field_count, false);
|
|
|
|
for (uint32_t enum_field_index = 0; enum_field_index < enum_field_count; enum_field_index++)
|
|
{
|
|
TypeEnumField *enum_field = &type_entry->data.enumeration.fields[enum_field_index];
|
|
ZigValue *enum_field_val = &enum_field_array->data.x_array.data.s_none.elements[enum_field_index];
|
|
make_enum_field_val(ira, enum_field_val, enum_field, type_info_enum_field_type);
|
|
enum_field_val->parent.id = ConstParentIdArray;
|
|
enum_field_val->parent.data.p_array.array_val = enum_field_array;
|
|
enum_field_val->parent.data.p_array.elem_index = enum_field_index;
|
|
}
|
|
// decls: []TypeInfo.Declaration
|
|
ensure_field_index(result->type, "decls", 3);
|
|
if ((err = ir_make_type_info_decls(ira, source_instr, fields[3],
|
|
type_entry->data.enumeration.decls_scope, false)))
|
|
{
|
|
return err;
|
|
}
|
|
// is_exhaustive: bool
|
|
ensure_field_index(result->type, "is_exhaustive", 4);
|
|
fields[4]->special = ConstValSpecialStatic;
|
|
fields[4]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[4]->data.x_bool = !type_entry->data.enumeration.non_exhaustive;
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdErrorSet:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "ErrorSet", nullptr);
|
|
|
|
ZigType *type_info_error_type = ir_type_info_get_type(ira, "Error", nullptr);
|
|
if (!resolve_inferred_error_set(ira->codegen, type_entry, source_instr->source_node)) {
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
if (type_is_global_error_set(type_entry)) {
|
|
result->data.x_optional = nullptr;
|
|
break;
|
|
}
|
|
if ((err = type_resolve(ira->codegen, type_info_error_type, ResolveStatusSizeKnown))) {
|
|
zig_unreachable();
|
|
}
|
|
ZigValue *slice_val = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->data.x_optional = slice_val;
|
|
|
|
uint32_t error_count = type_entry->data.error_set.err_count;
|
|
ZigValue *error_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
error_array->special = ConstValSpecialStatic;
|
|
error_array->type = get_array_type(ira->codegen, type_info_error_type, error_count, nullptr);
|
|
error_array->data.x_array.special = ConstArraySpecialNone;
|
|
error_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(error_count);
|
|
|
|
init_const_slice(ira->codegen, slice_val, error_array, 0, error_count, false);
|
|
for (uint32_t error_index = 0; error_index < error_count; error_index++) {
|
|
ErrorTableEntry *error = type_entry->data.error_set.errors[error_index];
|
|
ZigValue *error_val = &error_array->data.x_array.data.s_none.elements[error_index];
|
|
|
|
error_val->special = ConstValSpecialStatic;
|
|
error_val->type = type_info_error_type;
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
|
|
ZigValue *name = nullptr;
|
|
if (error->cached_error_name_val != nullptr)
|
|
name = error->cached_error_name_val;
|
|
if (name == nullptr)
|
|
name = create_const_str_lit(ira->codegen, &error->name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(&error->name), true);
|
|
|
|
error_val->data.x_struct.fields = inner_fields;
|
|
error_val->parent.id = ConstParentIdArray;
|
|
error_val->parent.data.p_array.array_val = error_array;
|
|
error_val->parent.data.p_array.elem_index = error_index;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdErrorUnion:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "ErrorUnion", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// error_set: type
|
|
ensure_field_index(result->type, "error_set", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[0]->data.x_type = type_entry->data.error_union.err_set_type;
|
|
|
|
// payload: type
|
|
ensure_field_index(result->type, "payload", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
fields[1]->data.x_type = type_entry->data.error_union.payload_type;
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdUnion:
|
|
{
|
|
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Union", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 4);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// layout: ContainerLayout
|
|
ensure_field_index(result->type, "layout", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.unionation.layout);
|
|
// tag_type: ?type
|
|
ensure_field_index(result->type, "tag_type", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
|
|
|
|
AstNode *union_decl_node = type_entry->data.unionation.decl_node;
|
|
if (union_decl_node->data.container_decl.auto_enum ||
|
|
union_decl_node->data.container_decl.init_arg_expr != nullptr)
|
|
{
|
|
ZigValue *tag_type = ira->codegen->pass1_arena->create<ZigValue>();
|
|
tag_type->special = ConstValSpecialStatic;
|
|
tag_type->type = ira->codegen->builtin_types.entry_type;
|
|
tag_type->data.x_type = type_entry->data.unionation.tag_type;
|
|
fields[1]->data.x_optional = tag_type;
|
|
} else {
|
|
fields[1]->data.x_optional = nullptr;
|
|
}
|
|
// fields: []TypeInfo.UnionField
|
|
ensure_field_index(result->type, "fields", 2);
|
|
|
|
ZigType *type_info_union_field_type = ir_type_info_get_type(ira, "UnionField", nullptr);
|
|
if ((err = type_resolve(ira->codegen, type_info_union_field_type, ResolveStatusSizeKnown)))
|
|
zig_unreachable();
|
|
uint32_t union_field_count = type_entry->data.unionation.src_field_count;
|
|
|
|
ZigValue *union_field_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
union_field_array->special = ConstValSpecialStatic;
|
|
union_field_array->type = get_array_type(ira->codegen, type_info_union_field_type, union_field_count, nullptr);
|
|
union_field_array->data.x_array.special = ConstArraySpecialNone;
|
|
union_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(union_field_count);
|
|
|
|
init_const_slice(ira->codegen, fields[2], union_field_array, 0, union_field_count, false);
|
|
|
|
for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
|
|
TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
|
|
ZigValue *union_field_val = &union_field_array->data.x_array.data.s_none.elements[union_field_index];
|
|
|
|
union_field_val->special = ConstValSpecialStatic;
|
|
union_field_val->type = type_info_union_field_type;
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 3);
|
|
// field_type: type
|
|
inner_fields[1]->special = ConstValSpecialStatic;
|
|
inner_fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
inner_fields[1]->data.x_type = union_field->type_entry;
|
|
|
|
// alignment: comptime_int
|
|
inner_fields[2]->special = ConstValSpecialStatic;
|
|
inner_fields[2]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&inner_fields[2]->data.x_bigint, union_field->align);
|
|
|
|
ZigValue *name = create_const_str_lit(ira->codegen, union_field->name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(union_field->name), true);
|
|
|
|
union_field_val->data.x_struct.fields = inner_fields;
|
|
union_field_val->parent.id = ConstParentIdArray;
|
|
union_field_val->parent.data.p_array.array_val = union_field_array;
|
|
union_field_val->parent.data.p_array.elem_index = union_field_index;
|
|
}
|
|
// decls: []TypeInfo.Declaration
|
|
ensure_field_index(result->type, "decls", 3);
|
|
if ((err = ir_make_type_info_decls(ira, source_instr, fields[3],
|
|
type_entry->data.unionation.decls_scope, false)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdStruct:
|
|
{
|
|
if (type_entry->data.structure.special == StructSpecialSlice) {
|
|
result = create_ptr_like_type_info(ira, source_instr, type_entry);
|
|
if (result == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
break;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 4);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// layout: ContainerLayout
|
|
ensure_field_index(result->type, "layout", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.structure.layout);
|
|
// fields: []TypeInfo.StructField
|
|
ensure_field_index(result->type, "fields", 1);
|
|
|
|
ZigType *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr);
|
|
if ((err = type_resolve(ira->codegen, type_info_struct_field_type, ResolveStatusSizeKnown))) {
|
|
zig_unreachable();
|
|
}
|
|
uint32_t struct_field_count = type_entry->data.structure.src_field_count;
|
|
|
|
ZigValue *struct_field_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
struct_field_array->special = ConstValSpecialStatic;
|
|
struct_field_array->type = get_array_type(ira->codegen, type_info_struct_field_type, struct_field_count, nullptr);
|
|
struct_field_array->data.x_array.special = ConstArraySpecialNone;
|
|
struct_field_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(struct_field_count);
|
|
|
|
init_const_slice(ira->codegen, fields[1], struct_field_array, 0, struct_field_count, false);
|
|
|
|
for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
|
|
TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index];
|
|
ZigValue *struct_field_val = &struct_field_array->data.x_array.data.s_none.elements[struct_field_index];
|
|
|
|
struct_field_val->special = ConstValSpecialStatic;
|
|
struct_field_val->type = type_info_struct_field_type;
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 5);
|
|
|
|
inner_fields[1]->special = ConstValSpecialStatic;
|
|
inner_fields[1]->type = ira->codegen->builtin_types.entry_type;
|
|
inner_fields[1]->data.x_type = struct_field->type_entry;
|
|
|
|
// default_value: anytype
|
|
inner_fields[2]->special = ConstValSpecialStatic;
|
|
inner_fields[2]->type = get_optional_type2(ira->codegen, struct_field->type_entry);
|
|
if (inner_fields[2]->type == nullptr) return ErrorSemanticAnalyzeFail;
|
|
memoize_field_init_val(ira->codegen, type_entry, struct_field);
|
|
if(struct_field->init_val != nullptr && type_is_invalid(struct_field->init_val->type)){
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
set_optional_payload(inner_fields[2], struct_field->init_val);
|
|
|
|
// is_comptime: bool
|
|
inner_fields[3]->special = ConstValSpecialStatic;
|
|
inner_fields[3]->type = ira->codegen->builtin_types.entry_bool;
|
|
inner_fields[3]->data.x_bool = struct_field->is_comptime;
|
|
|
|
// alignment: comptime_int
|
|
inner_fields[4]->special = ConstValSpecialStatic;
|
|
inner_fields[4]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&inner_fields[4]->data.x_bigint, struct_field->align);
|
|
|
|
ZigValue *name = create_const_str_lit(ira->codegen, struct_field->name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(struct_field->name), true);
|
|
|
|
struct_field_val->data.x_struct.fields = inner_fields;
|
|
struct_field_val->parent.id = ConstParentIdArray;
|
|
struct_field_val->parent.data.p_array.array_val = struct_field_array;
|
|
struct_field_val->parent.data.p_array.elem_index = struct_field_index;
|
|
}
|
|
// decls: []TypeInfo.Declaration
|
|
ensure_field_index(result->type, "decls", 2);
|
|
if ((err = ir_make_type_info_decls(ira, source_instr, fields[2],
|
|
type_entry->data.structure.decls_scope, false)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
// is_tuple: bool
|
|
ensure_field_index(result->type, "is_tuple", 3);
|
|
fields[3]->special = ConstValSpecialStatic;
|
|
fields[3]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[3]->data.x_bool = is_tuple(type_entry);
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdFn:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Fn", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 6);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// calling_convention: TypeInfo.CallingConvention
|
|
ensure_field_index(result->type, "calling_convention", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
fields[0]->type = get_builtin_type(ira->codegen, "CallingConvention");
|
|
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.fn.fn_type_id.cc);
|
|
// alignment: u29
|
|
ensure_field_index(result->type, "alignment", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
fields[1]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
|
bigint_init_unsigned(&fields[1]->data.x_bigint, type_entry->data.fn.fn_type_id.alignment);
|
|
// is_generic: bool
|
|
ensure_field_index(result->type, "is_generic", 2);
|
|
bool is_generic = type_entry->data.fn.is_generic;
|
|
fields[2]->special = ConstValSpecialStatic;
|
|
fields[2]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[2]->data.x_bool = is_generic;
|
|
// is_varargs: bool
|
|
ensure_field_index(result->type, "is_var_args", 3);
|
|
bool is_varargs = type_entry->data.fn.fn_type_id.is_var_args;
|
|
fields[3]->special = ConstValSpecialStatic;
|
|
fields[3]->type = ira->codegen->builtin_types.entry_bool;
|
|
fields[3]->data.x_bool = is_varargs;
|
|
// return_type: ?type
|
|
ensure_field_index(result->type, "return_type", 4);
|
|
fields[4]->special = ConstValSpecialStatic;
|
|
fields[4]->type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
|
|
if (type_entry->data.fn.fn_type_id.return_type == nullptr)
|
|
fields[4]->data.x_optional = nullptr;
|
|
else {
|
|
ZigValue *return_type = ira->codegen->pass1_arena->create<ZigValue>();
|
|
return_type->special = ConstValSpecialStatic;
|
|
return_type->type = ira->codegen->builtin_types.entry_type;
|
|
return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
|
|
fields[4]->data.x_optional = return_type;
|
|
}
|
|
// args: []TypeInfo.FnArg
|
|
ZigType *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr);
|
|
if ((err = type_resolve(ira->codegen, type_info_fn_arg_type, ResolveStatusSizeKnown))) {
|
|
zig_unreachable();
|
|
}
|
|
size_t fn_arg_count = type_entry->data.fn.fn_type_id.param_count;
|
|
|
|
ZigValue *fn_arg_array = ira->codegen->pass1_arena->create<ZigValue>();
|
|
fn_arg_array->special = ConstValSpecialStatic;
|
|
fn_arg_array->type = get_array_type(ira->codegen, type_info_fn_arg_type, fn_arg_count, nullptr);
|
|
fn_arg_array->data.x_array.special = ConstArraySpecialNone;
|
|
fn_arg_array->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(fn_arg_count);
|
|
|
|
init_const_slice(ira->codegen, fields[5], fn_arg_array, 0, fn_arg_count, false);
|
|
|
|
for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
|
|
FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
|
|
ZigValue *fn_arg_val = &fn_arg_array->data.x_array.data.s_none.elements[fn_arg_index];
|
|
|
|
fn_arg_val->special = ConstValSpecialStatic;
|
|
fn_arg_val->type = type_info_fn_arg_type;
|
|
|
|
bool arg_is_generic = fn_param_info->type == nullptr;
|
|
if (arg_is_generic) assert(is_generic);
|
|
|
|
ZigValue **inner_fields = alloc_const_vals_ptrs(ira->codegen, 3);
|
|
inner_fields[0]->special = ConstValSpecialStatic;
|
|
inner_fields[0]->type = ira->codegen->builtin_types.entry_bool;
|
|
inner_fields[0]->data.x_bool = arg_is_generic;
|
|
inner_fields[1]->special = ConstValSpecialStatic;
|
|
inner_fields[1]->type = ira->codegen->builtin_types.entry_bool;
|
|
inner_fields[1]->data.x_bool = fn_param_info->is_noalias;
|
|
inner_fields[2]->special = ConstValSpecialStatic;
|
|
inner_fields[2]->type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
|
|
|
|
if (arg_is_generic)
|
|
inner_fields[2]->data.x_optional = nullptr;
|
|
else {
|
|
ZigValue *arg_type = ira->codegen->pass1_arena->create<ZigValue>();
|
|
arg_type->special = ConstValSpecialStatic;
|
|
arg_type->type = ira->codegen->builtin_types.entry_type;
|
|
arg_type->data.x_type = fn_param_info->type;
|
|
inner_fields[2]->data.x_optional = arg_type;
|
|
}
|
|
|
|
fn_arg_val->data.x_struct.fields = inner_fields;
|
|
fn_arg_val->parent.id = ConstParentIdArray;
|
|
fn_arg_val->parent.data.p_array.array_val = fn_arg_array;
|
|
fn_arg_val->parent.data.p_array.elem_index = fn_arg_index;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdBoundFn:
|
|
{
|
|
ZigType *fn_type = type_entry->data.bound_fn.fn_type;
|
|
assert(fn_type->id == ZigTypeIdFn);
|
|
if ((err = ir_make_type_info_value(ira, source_instr, fn_type, &result)))
|
|
return err;
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdOpaque:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Opaque", nullptr);
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// decls: []TypeInfo.Declaration
|
|
ensure_field_index(result->type, "decls", 0);
|
|
if ((err = ir_make_type_info_decls(ira, source_instr, fields[0],
|
|
type_entry->data.opaque.decls_scope, false)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case ZigTypeIdFnFrame:
|
|
{
|
|
result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = ir_type_info_get_type(ira, "Frame", nullptr);
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 1);
|
|
result->data.x_struct.fields = fields;
|
|
ZigFn *fn = type_entry->data.frame.fn;
|
|
// function: anytype
|
|
ensure_field_index(result->type, "function", 0);
|
|
fields[0] = create_const_fn(ira->codegen, fn);
|
|
break;
|
|
}
|
|
}
|
|
|
|
assert(result != nullptr);
|
|
ira->codegen->type_info_cache.put(type_entry, result);
|
|
*out = result;
|
|
return ErrorNone;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_type_info(IrAnalyze *ira, IrInstSrcTypeInfo *instruction) {
|
|
Error err;
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
ZigType *type_entry = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *result_type = ir_type_info_get_type(ira, nullptr, nullptr);
|
|
|
|
ZigValue *payload;
|
|
if ((err = ir_make_type_info_value(ira, &instruction->base.base, type_entry, &payload)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
|
|
ZigValue *out_val = result->value;
|
|
bigint_init_unsigned(&out_val->data.x_union.tag, type_id_index(type_entry));
|
|
out_val->data.x_union.payload = payload;
|
|
|
|
if (payload != nullptr) {
|
|
payload->parent.id = ConstParentIdUnion;
|
|
payload->parent.data.p_union.union_val = out_val;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static ZigValue *get_const_field(IrAnalyze *ira, AstNode *source_node, ZigValue *struct_value,
|
|
const char *name, size_t field_index)
|
|
{
|
|
Error err;
|
|
ensure_field_index(struct_value->type, name, field_index);
|
|
ZigValue *val = struct_value->data.x_struct.fields[field_index];
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_node, val, UndefBad)))
|
|
return nullptr;
|
|
return val;
|
|
}
|
|
|
|
static Error get_const_field_sentinel(IrAnalyze *ira, IrInst* source_instr, ZigValue *struct_value,
|
|
const char *name, size_t field_index, ZigType *elem_type, ZigValue **result)
|
|
{
|
|
ZigValue *field_val = get_const_field(ira, source_instr->source_node, struct_value, name, field_index);
|
|
if (field_val == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
IrInstGen *field_inst = ir_const_move(ira, source_instr, field_val);
|
|
IrInstGen *casted_field_inst = ir_implicit_cast(ira, field_inst,
|
|
get_optional_type(ira->codegen, elem_type));
|
|
if (type_is_invalid(casted_field_inst->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if (optional_value_is_null(casted_field_inst->value)) {
|
|
*result = nullptr;
|
|
} else {
|
|
assert(type_has_optional_repr(casted_field_inst->value->type));
|
|
*result = casted_field_inst->value->data.x_optional;
|
|
}
|
|
|
|
return ErrorNone;
|
|
}
|
|
|
|
static Error get_const_field_bool(IrAnalyze *ira, AstNode *source_node, ZigValue *struct_value,
|
|
const char *name, size_t field_index, bool *out)
|
|
{
|
|
ZigValue *value = get_const_field(ira, source_node, struct_value, name, field_index);
|
|
if (value == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
assert(value->type == ira->codegen->builtin_types.entry_bool);
|
|
*out = value->data.x_bool;
|
|
return ErrorNone;
|
|
}
|
|
|
|
static BigInt *get_const_field_lit_int(IrAnalyze *ira, AstNode *source_node, ZigValue *struct_value, const char *name, size_t field_index)
|
|
{
|
|
ZigValue *value = get_const_field(ira, source_node, struct_value, name, field_index);
|
|
if (value == nullptr)
|
|
return nullptr;
|
|
assert(value->type == ira->codegen->builtin_types.entry_num_lit_int);
|
|
return &value->data.x_bigint;
|
|
}
|
|
|
|
static ZigType *get_const_field_meta_type(IrAnalyze *ira, AstNode *source_node, ZigValue *struct_value, const char *name, size_t field_index)
|
|
{
|
|
ZigValue *value = get_const_field(ira, source_node, struct_value, name, field_index);
|
|
if (value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(value->type == ira->codegen->builtin_types.entry_type);
|
|
return value->data.x_type;
|
|
}
|
|
|
|
static ZigType *get_const_field_meta_type_optional(IrAnalyze *ira, AstNode *source_node,
|
|
ZigValue *struct_value, const char *name, size_t field_index)
|
|
{
|
|
ZigValue *value = get_const_field(ira, source_node, struct_value, name, field_index);
|
|
if (value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(value->type->id == ZigTypeIdOptional);
|
|
assert(value->type->data.maybe.child_type == ira->codegen->builtin_types.entry_type);
|
|
if (value->data.x_optional == nullptr)
|
|
return nullptr;
|
|
return value->data.x_optional->data.x_type;
|
|
}
|
|
|
|
static Error get_const_field_buf(IrAnalyze *ira, AstNode *source_node, ZigValue *struct_value,
|
|
const char *name, size_t field_index, Buf *out)
|
|
{
|
|
ZigValue *slice = get_const_field(ira, source_node, struct_value, name, field_index);
|
|
ZigValue *ptr = slice->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *len = slice->data.x_struct.fields[slice_len_index];
|
|
assert(ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
ZigValue *arr = ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(arr->special == ConstValSpecialStatic);
|
|
|
|
const size_t start_value = ptr->data.x_ptr.data.base_array.elem_index;
|
|
const size_t len_value = bigint_as_usize(&len->data.x_bigint);
|
|
|
|
switch (arr->data.x_array.special) {
|
|
case ConstArraySpecialUndef:
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ConstArraySpecialNone: {
|
|
assert(start_value <= arr->type->data.array.len);
|
|
assert(start_value + len_value <= arr->type->data.array.len);
|
|
buf_resize(out, 0);
|
|
for (size_t j = 0; j < len_value; j++) {
|
|
ZigValue *ch_val = &arr->data.x_array.data.s_none.elements[start_value + j];
|
|
unsigned ch = bigint_as_u32(&ch_val->data.x_bigint);
|
|
buf_append_char(out, ch);
|
|
}
|
|
break;
|
|
}
|
|
case ConstArraySpecialBuf:
|
|
assert(start_value <= buf_len(arr->data.x_array.data.s_buf));
|
|
assert(start_value + len_value <= buf_len(arr->data.x_array.data.s_buf));
|
|
buf_init_from_mem(out, buf_ptr(arr->data.x_array.data.s_buf) + start_value, len_value);
|
|
break;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
|
|
static ZigType *type_info_to_type(IrAnalyze *ira, IrInst *source_instr, ZigTypeId tagTypeId, ZigValue *payload) {
|
|
Error err;
|
|
switch (tagTypeId) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
return ira->codegen->builtin_types.entry_type;
|
|
case ZigTypeIdVoid:
|
|
return ira->codegen->builtin_types.entry_void;
|
|
case ZigTypeIdBool:
|
|
return ira->codegen->builtin_types.entry_bool;
|
|
case ZigTypeIdUnreachable:
|
|
return ira->codegen->builtin_types.entry_unreachable;
|
|
case ZigTypeIdComptimeFloat:
|
|
return ira->codegen->builtin_types.entry_num_lit_float;
|
|
case ZigTypeIdComptimeInt:
|
|
return ira->codegen->builtin_types.entry_num_lit_int;
|
|
case ZigTypeIdUndefined:
|
|
return ira->codegen->builtin_types.entry_undef;
|
|
case ZigTypeIdNull:
|
|
return ira->codegen->builtin_types.entry_null;
|
|
case ZigTypeIdEnumLiteral:
|
|
return ira->codegen->builtin_types.entry_enum_literal;
|
|
default:
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_instr->source_node, payload, UndefBad)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
switch (tagTypeId) {
|
|
case ZigTypeIdInvalid:
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdEnumLiteral:
|
|
zig_unreachable();
|
|
case ZigTypeIdInt: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Int", nullptr));
|
|
BigInt *bi = get_const_field_lit_int(ira, source_instr->source_node, payload, "bits", 1);
|
|
if (bi == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
ZigValue *value = get_const_field(ira, source_instr->source_node, payload, "signedness", 0);
|
|
if (value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(value->type == get_builtin_type(ira->codegen, "Signedness"));
|
|
bool is_signed = !bigint_as_u32(&value->data.x_enum_tag);
|
|
return get_int_type(ira->codegen, is_signed, bigint_as_u32(bi));
|
|
}
|
|
case ZigTypeIdFloat:
|
|
{
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Float", nullptr));
|
|
BigInt *bi = get_const_field_lit_int(ira, source_instr->source_node, payload, "bits", 0);
|
|
if (bi == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
uint32_t bits = bigint_as_u32(bi);
|
|
switch (bits) {
|
|
case 16: return ira->codegen->builtin_types.entry_f16;
|
|
case 32: return ira->codegen->builtin_types.entry_f32;
|
|
case 64: return ira->codegen->builtin_types.entry_f64;
|
|
case 128: return ira->codegen->builtin_types.entry_f128;
|
|
}
|
|
ir_add_error(ira, source_instr, buf_sprintf("%d-bit float unsupported", bits));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
case ZigTypeIdPointer:
|
|
{
|
|
ZigType *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == type_info_pointer_type);
|
|
ZigValue *size_value = get_const_field(ira, source_instr->source_node, payload, "size", 0);
|
|
if (size_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(size_value->type == ir_type_info_get_type(ira, "Size", type_info_pointer_type));
|
|
BuiltinPtrSize size_enum_index = (BuiltinPtrSize)bigint_as_u32(&size_value->data.x_enum_tag);
|
|
PtrLen ptr_len = size_enum_index_to_ptr_len(size_enum_index);
|
|
ZigType *elem_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "child", 4);
|
|
if (type_is_invalid(elem_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
ZigValue *sentinel;
|
|
if ((err = get_const_field_sentinel(ira, source_instr, payload, "sentinel", 6,
|
|
elem_type, &sentinel)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
if (sentinel != nullptr && (size_enum_index == BuiltinPtrSizeOne || size_enum_index == BuiltinPtrSizeC)) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("sentinels are only allowed on slices and unknown-length pointers"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
BigInt *alignment = get_const_field_lit_int(ira, source_instr->source_node, payload, "alignment", 3);
|
|
if (alignment == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
bool is_const;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_const", 1, &is_const)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
bool is_volatile;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_volatile", 2,
|
|
&is_volatile)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
bool is_allowzero;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_allowzero", 5,
|
|
&is_allowzero)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
|
|
ZigType *ptr_type = get_pointer_to_type_extra2(ira->codegen,
|
|
elem_type,
|
|
is_const,
|
|
is_volatile,
|
|
ptr_len,
|
|
bigint_as_u32(alignment),
|
|
0, // bit_offset_in_host
|
|
0, // host_int_bytes
|
|
is_allowzero,
|
|
VECTOR_INDEX_NONE, nullptr, sentinel);
|
|
if (size_enum_index != BuiltinPtrSizeSlice)
|
|
return ptr_type;
|
|
return get_slice_type(ira->codegen, ptr_type);
|
|
}
|
|
case ZigTypeIdArray: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Array", nullptr));
|
|
ZigType *elem_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "child", 1);
|
|
if (type_is_invalid(elem_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
ZigValue *sentinel;
|
|
if ((err = get_const_field_sentinel(ira, source_instr, payload, "sentinel", 2,
|
|
elem_type, &sentinel)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
BigInt *bi = get_const_field_lit_int(ira, source_instr->source_node, payload, "len", 0);
|
|
if (bi == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
return get_array_type(ira->codegen, elem_type, bigint_as_u64(bi), sentinel);
|
|
}
|
|
case ZigTypeIdOptional: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Optional", nullptr));
|
|
ZigType *child_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "child", 0);
|
|
if (type_is_invalid(child_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
return get_optional_type(ira->codegen, child_type);
|
|
}
|
|
case ZigTypeIdErrorUnion: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "ErrorUnion", nullptr));
|
|
ZigType *err_set_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "error_set", 0);
|
|
if (type_is_invalid(err_set_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
ZigType *payload_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "payload", 1);
|
|
if (type_is_invalid(payload_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
return get_error_union_type(ira->codegen, err_set_type, payload_type);
|
|
}
|
|
case ZigTypeIdOpaque: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Opaque", nullptr));
|
|
|
|
ZigValue *decls_value = get_const_field(ira, source_instr->source_node, payload, "decls", 0);
|
|
if (decls_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(decls_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(decls_value->type));
|
|
ZigValue *decls_len_value = decls_value->data.x_struct.fields[slice_len_index];
|
|
size_t decls_len = bigint_as_usize(&decls_len_value->data.x_bigint);
|
|
if (decls_len != 0) {
|
|
ir_add_error(ira, source_instr, buf_create_from_str("TypeInfo.Struct.decls must be empty for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
Buf *bare_name = buf_alloc();
|
|
Buf *full_name = get_anon_type_name(ira->codegen,
|
|
ira->old_irb.exec, "opaque", source_instr->scope, source_instr->source_node, bare_name);
|
|
return get_opaque_type(ira->codegen,
|
|
source_instr->scope, source_instr->source_node, buf_ptr(full_name), bare_name);
|
|
}
|
|
case ZigTypeIdVector: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Vector", nullptr));
|
|
BigInt *len = get_const_field_lit_int(ira, source_instr->source_node, payload, "len", 0);
|
|
if (len == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
ZigType *child_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "child", 1);
|
|
if ((err = ir_validate_vector_elem_type(ira, source_instr->source_node, child_type))) {
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
return get_vector_type(ira->codegen, bigint_as_u32(len), child_type);
|
|
}
|
|
case ZigTypeIdAnyFrame: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "AnyFrame", nullptr));
|
|
ZigType *child_type = get_const_field_meta_type_optional(ira, source_instr->source_node, payload, "child", 0);
|
|
if (child_type != nullptr && type_is_invalid(child_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
return get_any_frame_type(ira->codegen, child_type);
|
|
}
|
|
case ZigTypeIdFnFrame: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Frame", nullptr));
|
|
ZigValue *function = get_const_field(ira, source_instr->source_node, payload, "function", 0);
|
|
if (function == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(function->type->id == ZigTypeIdFn);
|
|
ZigFn *fn = function->data.x_ptr.data.fn.fn_entry;
|
|
return get_fn_frame_type(ira->codegen, fn);
|
|
}
|
|
case ZigTypeIdErrorSet: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type->id == ZigTypeIdOptional);
|
|
ZigValue *slice = payload->data.x_optional;
|
|
if (slice == nullptr)
|
|
return ira->codegen->builtin_types.entry_global_error_set;
|
|
assert(slice->special == ConstValSpecialStatic);
|
|
assert(is_slice(slice->type));
|
|
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
|
Buf bare_name = BUF_INIT;
|
|
buf_init_from_buf(&err_set_type->name, get_anon_type_name(ira->codegen, ira->old_irb.exec, "error", source_instr->scope, source_instr->source_node, &bare_name));
|
|
err_set_type->size_in_bits = ira->codegen->builtin_types.entry_global_error_set->size_in_bits;
|
|
err_set_type->abi_align = ira->codegen->builtin_types.entry_global_error_set->abi_align;
|
|
err_set_type->abi_size = ira->codegen->builtin_types.entry_global_error_set->abi_size;
|
|
ZigValue *ptr = slice->data.x_struct.fields[slice_ptr_index];
|
|
assert(ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);;
|
|
assert(ptr->data.x_ptr.data.base_array.elem_index == 0);
|
|
ZigValue *arr = ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(arr->special == ConstValSpecialStatic);
|
|
assert(arr->data.x_array.special == ConstArraySpecialNone);
|
|
ZigValue *len = slice->data.x_struct.fields[slice_len_index];
|
|
size_t count = bigint_as_usize(&len->data.x_bigint);
|
|
err_set_type->data.error_set.err_count = count;
|
|
err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(count);
|
|
bool *already_set = heap::c_allocator.allocate<bool>(ira->codegen->errors_by_index.length + count);
|
|
for (size_t i = 0; i < count; i++) {
|
|
ZigValue *error = &arr->data.x_array.data.s_none.elements[i];
|
|
assert(error->type == ir_type_info_get_type(ira, "Error", nullptr));
|
|
ErrorTableEntry *err_entry = heap::c_allocator.create<ErrorTableEntry>();
|
|
err_entry->decl_node = source_instr->source_node;
|
|
if ((err = get_const_field_buf(ira, source_instr->source_node, error, "name", 0, &err_entry->name)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
auto existing_entry = ira->codegen->error_table.put_unique(&err_entry->name, err_entry);
|
|
if (existing_entry) {
|
|
err_entry->value = existing_entry->value->value;
|
|
} else {
|
|
size_t error_value_count = ira->codegen->errors_by_index.length;
|
|
assert((uint32_t)error_value_count < (((uint32_t)1) << (uint32_t)ira->codegen->err_tag_type->data.integral.bit_count));
|
|
err_entry->value = error_value_count;
|
|
ira->codegen->errors_by_index.append(err_entry);
|
|
}
|
|
if (already_set[err_entry->value]) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("duplicate error: %s", buf_ptr(&err_entry->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
} else {
|
|
already_set[err_entry->value] = true;
|
|
}
|
|
err_set_type->data.error_set.errors[i] = err_entry;
|
|
}
|
|
return err_set_type;
|
|
}
|
|
case ZigTypeIdStruct: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Struct", nullptr));
|
|
|
|
ZigValue *layout_value = get_const_field(ira, source_instr->source_node, payload, "layout", 0);
|
|
if (layout_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(layout_value->special == ConstValSpecialStatic);
|
|
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
|
|
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
|
|
|
|
ZigValue *fields_value = get_const_field(ira, source_instr->source_node, payload, "fields", 1);
|
|
if (fields_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(fields_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(fields_value->type));
|
|
ZigValue *fields_ptr = fields_value->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
|
|
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
|
|
|
|
ZigValue *decls_value = get_const_field(ira, source_instr->source_node, payload, "decls", 2);
|
|
if (decls_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(decls_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(decls_value->type));
|
|
ZigValue *decls_len_value = decls_value->data.x_struct.fields[slice_len_index];
|
|
size_t decls_len = bigint_as_usize(&decls_len_value->data.x_bigint);
|
|
if (decls_len != 0) {
|
|
ir_add_error(ira, source_instr, buf_create_from_str("TypeInfo.Struct.decls must be empty for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
bool is_tuple;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_tuple", 3, &is_tuple)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
ZigType *entry = new_type_table_entry(ZigTypeIdStruct);
|
|
buf_init_from_buf(&entry->name,
|
|
get_anon_type_name(ira->codegen, ira->old_irb.exec, "struct", source_instr->scope, source_instr->source_node, &entry->name));
|
|
entry->data.structure.decl_node = source_instr->source_node;
|
|
entry->data.structure.fields = alloc_type_struct_fields(fields_len);
|
|
entry->data.structure.fields_by_name.init(fields_len);
|
|
entry->data.structure.src_field_count = fields_len;
|
|
entry->data.structure.layout = layout;
|
|
entry->data.structure.special = is_tuple ? StructSpecialInferredTuple : StructSpecialNone;
|
|
entry->data.structure.created_by_at_type = true;
|
|
entry->data.structure.decls_scope = create_decls_scope(
|
|
ira->codegen, source_instr->source_node, source_instr->scope, entry, get_scope_import(source_instr->scope), &entry->name);
|
|
|
|
assert(fields_ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
assert(fields_ptr->data.x_ptr.data.base_array.elem_index == 0);
|
|
ZigValue *fields_arr = fields_ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(fields_arr->special == ConstValSpecialStatic);
|
|
assert(fields_arr->data.x_array.special == ConstArraySpecialNone);
|
|
for (size_t i = 0; i < fields_len; i++) {
|
|
ZigValue *field_value = &fields_arr->data.x_array.data.s_none.elements[i];
|
|
assert(field_value->type == ir_type_info_get_type(ira, "StructField", nullptr));
|
|
TypeStructField *field = entry->data.structure.fields[i];
|
|
field->name = buf_alloc();
|
|
if ((err = get_const_field_buf(ira, source_instr->source_node, field_value, "name", 0, field->name)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->decl_node = source_instr->source_node;
|
|
ZigValue *type_value = get_const_field(ira, source_instr->source_node, field_value, "field_type", 1);
|
|
if (type_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->type_val = type_value;
|
|
field->type_entry = type_value->data.x_type;
|
|
if (entry->data.structure.fields_by_name.put_unique(field->name, field) != nullptr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("duplicate struct field '%s'", buf_ptr(field->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
ZigValue *default_value = get_const_field(ira, source_instr->source_node, field_value, "default_value", 2);
|
|
if (default_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (default_value->type->id == ZigTypeIdNull) {
|
|
field->init_val = nullptr;
|
|
} else if (default_value->type->id == ZigTypeIdOptional && default_value->type->data.maybe.child_type == field->type_entry) {
|
|
field->init_val = default_value->data.x_optional;
|
|
} else if (default_value->type == field->type_entry) {
|
|
field->init_val = default_value;
|
|
} else {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("default_value of field '%s' is of type '%s', expected '%s' or '?%s'",
|
|
buf_ptr(field->name), buf_ptr(&default_value->type->name),
|
|
buf_ptr(&field->type_entry->name), buf_ptr(&field->type_entry->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, field_value, "is_comptime", 3, &field->is_comptime)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
BigInt *alignment = get_const_field_lit_int(ira, source_instr->source_node, field_value, "alignment", 4);
|
|
if (alignment == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->align = bigint_as_u32(alignment);
|
|
}
|
|
|
|
return entry;
|
|
}
|
|
case ZigTypeIdEnum: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Enum", nullptr));
|
|
|
|
ZigValue *layout_value = get_const_field(ira, source_instr->source_node, payload, "layout", 0);
|
|
if (layout_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(layout_value->special == ConstValSpecialStatic);
|
|
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
|
|
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
|
|
|
|
ZigType *tag_type = get_const_field_meta_type(ira, source_instr->source_node, payload, "tag_type", 1);
|
|
if (type_is_invalid(tag_type))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (tag_type->id != ZigTypeIdInt) {
|
|
ir_add_error(ira, source_instr, buf_sprintf(
|
|
"TypeInfo.Enum.tag_type must be an integer type, not '%s'", buf_ptr(&tag_type->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
ZigValue *fields_value = get_const_field(ira, source_instr->source_node, payload, "fields", 2);
|
|
if (fields_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(fields_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(fields_value->type));
|
|
ZigValue *fields_ptr = fields_value->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
|
|
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
|
|
|
|
ZigValue *decls_value = get_const_field(ira, source_instr->source_node, payload, "decls", 3);
|
|
if (decls_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(decls_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(decls_value->type));
|
|
ZigValue *decls_len_value = decls_value->data.x_struct.fields[slice_len_index];
|
|
size_t decls_len = bigint_as_usize(&decls_len_value->data.x_bigint);
|
|
if (decls_len != 0) {
|
|
ir_add_error(ira, source_instr, buf_create_from_str("TypeInfo.Enum.decls must be empty for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
Error err;
|
|
bool is_exhaustive;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_exhaustive", 4, &is_exhaustive)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
ZigType *entry = new_type_table_entry(ZigTypeIdEnum);
|
|
buf_init_from_buf(&entry->name,
|
|
get_anon_type_name(ira->codegen, ira->old_irb.exec, "enum", source_instr->scope, source_instr->source_node, &entry->name));
|
|
entry->data.enumeration.decl_node = source_instr->source_node;
|
|
entry->data.enumeration.tag_int_type = tag_type;
|
|
entry->data.enumeration.decls_scope = create_decls_scope(
|
|
ira->codegen, source_instr->source_node, source_instr->scope, entry, get_scope_import(source_instr->scope), &entry->name);
|
|
entry->data.enumeration.fields = heap::c_allocator.allocate<TypeEnumField>(fields_len);
|
|
entry->data.enumeration.fields_by_name.init(fields_len);
|
|
entry->data.enumeration.src_field_count = fields_len;
|
|
entry->data.enumeration.layout = layout;
|
|
entry->data.enumeration.non_exhaustive = !is_exhaustive;
|
|
|
|
assert(fields_ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
assert(fields_ptr->data.x_ptr.data.base_array.elem_index == 0);
|
|
ZigValue *fields_arr = fields_ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(fields_arr->special == ConstValSpecialStatic);
|
|
assert(fields_arr->data.x_array.special == ConstArraySpecialNone);
|
|
for (size_t i = 0; i < fields_len; i++) {
|
|
ZigValue *field_value = &fields_arr->data.x_array.data.s_none.elements[i];
|
|
assert(field_value->type == ir_type_info_get_type(ira, "EnumField", nullptr));
|
|
TypeEnumField *field = &entry->data.enumeration.fields[i];
|
|
field->name = buf_alloc();
|
|
if ((err = get_const_field_buf(ira, source_instr->source_node, field_value, "name", 0, field->name)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->decl_index = i;
|
|
field->decl_node = source_instr->source_node;
|
|
if (entry->data.enumeration.fields_by_name.put_unique(field->name, field) != nullptr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("duplicate enum field '%s'", buf_ptr(field->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
BigInt *field_int_value = get_const_field_lit_int(ira, source_instr->source_node, field_value, "value", 1);
|
|
if (field_int_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->value = *field_int_value;
|
|
}
|
|
return entry;
|
|
}
|
|
case ZigTypeIdUnion: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Union", nullptr));
|
|
|
|
ZigValue *layout_value = get_const_field(ira, source_instr->source_node, payload, "layout", 0);
|
|
if (layout_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(layout_value->special == ConstValSpecialStatic);
|
|
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
|
|
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
|
|
|
|
ZigType *tag_type = get_const_field_meta_type_optional(ira, source_instr->source_node, payload, "tag_type", 1);
|
|
if (tag_type != nullptr && type_is_invalid(tag_type)) {
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
if (tag_type != nullptr && tag_type->id != ZigTypeIdEnum) {
|
|
ir_add_error(ira, source_instr, buf_sprintf(
|
|
"expected enum type, found '%s'", type_id_name(tag_type->id)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
ZigValue *fields_value = get_const_field(ira, source_instr->source_node, payload, "fields", 2);
|
|
if (fields_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(fields_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(fields_value->type));
|
|
ZigValue *fields_ptr = fields_value->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
|
|
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
|
|
|
|
ZigValue *decls_value = get_const_field(ira, source_instr->source_node, payload, "decls", 3);
|
|
if (decls_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
assert(decls_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(decls_value->type));
|
|
ZigValue *decls_len_value = decls_value->data.x_struct.fields[slice_len_index];
|
|
size_t decls_len = bigint_as_usize(&decls_len_value->data.x_bigint);
|
|
if (decls_len != 0) {
|
|
ir_add_error(ira, source_instr, buf_create_from_str("TypeInfo.Union.decls must be empty for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
ZigType *entry = new_type_table_entry(ZigTypeIdUnion);
|
|
buf_init_from_buf(&entry->name,
|
|
get_anon_type_name(ira->codegen, ira->old_irb.exec, "union", source_instr->scope, source_instr->source_node, &entry->name));
|
|
entry->data.unionation.decl_node = source_instr->source_node;
|
|
entry->data.unionation.fields = heap::c_allocator.allocate<TypeUnionField>(fields_len);
|
|
entry->data.unionation.fields_by_name.init(fields_len);
|
|
entry->data.unionation.decls_scope = create_decls_scope(
|
|
ira->codegen, source_instr->source_node, source_instr->scope, entry, get_scope_import(source_instr->scope), &entry->name);
|
|
entry->data.unionation.tag_type = tag_type;
|
|
entry->data.unionation.src_field_count = fields_len;
|
|
entry->data.unionation.layout = layout;
|
|
|
|
assert(fields_ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
assert(fields_ptr->data.x_ptr.data.base_array.elem_index == 0);
|
|
ZigValue *fields_arr = fields_ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(fields_arr->special == ConstValSpecialStatic);
|
|
assert(fields_arr->data.x_array.special == ConstArraySpecialNone);
|
|
for (size_t i = 0; i < fields_len; i++) {
|
|
ZigValue *field_value = &fields_arr->data.x_array.data.s_none.elements[i];
|
|
assert(field_value->type == ir_type_info_get_type(ira, "UnionField", nullptr));
|
|
TypeUnionField *field = &entry->data.unionation.fields[i];
|
|
field->name = buf_alloc();
|
|
if ((err = get_const_field_buf(ira, source_instr->source_node, field_value, "name", 0, field->name)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (entry->data.unionation.fields_by_name.put_unique(field->name, field) != nullptr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("duplicate union field '%s'", buf_ptr(field->name)));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
field->decl_node = source_instr->source_node;
|
|
ZigValue *type_value = get_const_field(ira, source_instr->source_node, field_value, "field_type", 1);
|
|
if (type_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->type_val = type_value;
|
|
field->type_entry = type_value->data.x_type;
|
|
BigInt *alignment = get_const_field_lit_int(ira, source_instr->source_node, field_value, "alignment", 2);
|
|
if (alignment == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
field->align = bigint_as_u32(alignment);
|
|
}
|
|
return entry;
|
|
}
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdBoundFn: {
|
|
assert(payload->special == ConstValSpecialStatic);
|
|
assert(payload->type == ir_type_info_get_type(ira, "Fn", nullptr));
|
|
|
|
ZigValue *cc_value = get_const_field(ira, source_instr->source_node, payload, "calling_convention", 0);
|
|
if (cc_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(cc_value->special == ConstValSpecialStatic);
|
|
assert(cc_value->type == get_builtin_type(ira->codegen, "CallingConvention"));
|
|
CallingConvention cc = (CallingConvention)bigint_as_u32(&cc_value->data.x_enum_tag);
|
|
|
|
BigInt *alignment = get_const_field_lit_int(ira, source_instr->source_node, payload, "alignment", 1);
|
|
if (alignment == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
|
|
Error err;
|
|
bool is_generic;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_generic", 2, &is_generic)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (is_generic) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("TypeInfo.Fn.is_generic must be false for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
bool is_var_args;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, payload, "is_var_args", 3, &is_var_args)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (is_var_args && cc != CallingConventionC) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("varargs functions must have C calling convention"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
ZigType *return_type = get_const_field_meta_type_optional(ira, source_instr->source_node, payload, "return_type", 4);
|
|
if (return_type == nullptr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("TypeInfo.Fn.return_type must be non-null for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
|
|
ZigValue *args_value = get_const_field(ira, source_instr->source_node, payload, "args", 5);
|
|
if (args_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
assert(args_value->special == ConstValSpecialStatic);
|
|
assert(is_slice(args_value->type));
|
|
ZigValue *args_ptr = args_value->data.x_struct.fields[slice_ptr_index];
|
|
ZigValue *args_len_value = args_value->data.x_struct.fields[slice_len_index];
|
|
size_t args_len = bigint_as_usize(&args_len_value->data.x_bigint);
|
|
|
|
FnTypeId fn_type_id = {};
|
|
fn_type_id.return_type = return_type;
|
|
fn_type_id.param_info = heap::c_allocator.allocate<FnTypeParamInfo>(args_len);
|
|
fn_type_id.param_count = args_len;
|
|
fn_type_id.next_param_index = args_len;
|
|
fn_type_id.is_var_args = is_var_args;
|
|
fn_type_id.cc = cc;
|
|
fn_type_id.alignment = bigint_as_u32(alignment);
|
|
|
|
assert(args_ptr->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
|
assert(args_ptr->data.x_ptr.data.base_array.elem_index == 0);
|
|
ZigValue *args_arr = args_ptr->data.x_ptr.data.base_array.array_val;
|
|
assert(args_arr->special == ConstValSpecialStatic);
|
|
assert(args_arr->data.x_array.special == ConstArraySpecialNone);
|
|
for (size_t i = 0; i < args_len; i++) {
|
|
ZigValue *arg_value = &args_arr->data.x_array.data.s_none.elements[i];
|
|
assert(arg_value->type == ir_type_info_get_type(ira, "FnArg", nullptr));
|
|
FnTypeParamInfo *info = &fn_type_id.param_info[i];
|
|
Error err;
|
|
bool is_generic;
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, arg_value, "is_generic", 0, &is_generic)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
if (is_generic) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("TypeInfo.FnArg.is_generic must be false for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
if ((err = get_const_field_bool(ira, source_instr->source_node, arg_value, "is_noalias", 1, &info->is_noalias)))
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
ZigType *type = get_const_field_meta_type_optional(
|
|
ira, source_instr->source_node, arg_value, "arg_type", 2);
|
|
if (type == nullptr) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("TypeInfo.FnArg.arg_type must be non-null for @Type"));
|
|
return ira->codegen->invalid_inst_gen->value->type;
|
|
}
|
|
info->type = type;
|
|
}
|
|
|
|
ZigType *entry = get_fn_type(ira->codegen, &fn_type_id);
|
|
|
|
switch (tagTypeId) {
|
|
case ZigTypeIdFn:
|
|
return entry;
|
|
case ZigTypeIdBoundFn: {
|
|
ZigType *bound_fn_entry = new_type_table_entry(ZigTypeIdBoundFn);
|
|
bound_fn_entry->name = *buf_sprintf("(bound %s)", buf_ptr(&entry->name));
|
|
bound_fn_entry->data.bound_fn.fn_type = entry;
|
|
return bound_fn_entry;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_type(IrAnalyze *ira, IrInstSrcType *instruction) {
|
|
IrInstGen *uncasted_type_info = instruction->type_info->child;
|
|
if (type_is_invalid(uncasted_type_info->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *type_info = ir_implicit_cast(ira, uncasted_type_info, ir_type_info_get_type(ira, nullptr, nullptr));
|
|
if (type_is_invalid(type_info->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *type_info_val = ir_resolve_const(ira, type_info, UndefBad);
|
|
if (type_info_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigTypeId type_id_tag = type_id_at_index(bigint_as_usize(&type_info_val->data.x_union.tag));
|
|
ZigType *type = type_info_to_type(ira, &uncasted_type_info->base, type_id_tag,
|
|
type_info_val->data.x_union.payload);
|
|
if (type_is_invalid(type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_const_type(ira, &instruction->base.base, type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_set_eval_branch_quota(IrAnalyze *ira,
|
|
IrInstSrcSetEvalBranchQuota *instruction)
|
|
{
|
|
uint64_t new_quota;
|
|
if (!ir_resolve_usize(ira, instruction->new_quota->child, &new_quota))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (new_quota > *ira->new_irb.exec->backward_branch_quota) {
|
|
*ira->new_irb.exec->backward_branch_quota = new_quota;
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_type_name(IrAnalyze *ira, IrInstSrcTypeName *instruction) {
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
ZigType *type_entry = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!type_entry->cached_const_name_val) {
|
|
type_entry->cached_const_name_val = create_const_str_lit(ira->codegen, type_bare_name(type_entry));
|
|
}
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
copy_const_val(ira->codegen, result->value, type_entry->cached_const_name_val);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_c_import(IrAnalyze *ira, IrInstSrcCImport *instruction) {
|
|
Error err;
|
|
AstNode *node = instruction->base.base.source_node;
|
|
assert(node->type == NodeTypeFnCallExpr);
|
|
AstNode *block_node = node->data.fn_call_expr.params.at(0);
|
|
|
|
ScopeCImport *cimport_scope = create_cimport_scope(ira->codegen, node, instruction->base.base.scope);
|
|
|
|
// Execute the C import block like an inline function
|
|
ZigType *void_type = ira->codegen->builtin_types.entry_void;
|
|
ZigValue *cimport_result;
|
|
ZigValue *result_ptr;
|
|
create_result_ptr(ira->codegen, void_type, &cimport_result, &result_ptr);
|
|
if ((err = ir_eval_const_value(ira->codegen, &cimport_scope->base, block_node, result_ptr,
|
|
ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota, nullptr,
|
|
&cimport_scope->buf, block_node, nullptr, nullptr, nullptr, UndefBad)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (type_is_invalid(cimport_result->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigPackage *cur_scope_pkg = scope_package(instruction->base.base.scope);
|
|
Buf *namespace_name = buf_sprintf("%s.cimport:%" ZIG_PRI_usize ":%" ZIG_PRI_usize,
|
|
buf_ptr(&cur_scope_pkg->pkg_path), node->line + 1, node->column + 1);
|
|
|
|
ZigPackage *cimport_pkg = new_anonymous_package();
|
|
cimport_pkg->package_table.put(buf_create_from_str("builtin"), ira->codegen->compile_var_package);
|
|
cimport_pkg->package_table.put(buf_create_from_str("std"), ira->codegen->std_package);
|
|
buf_init_from_buf(&cimport_pkg->pkg_path, namespace_name);
|
|
|
|
const char *out_zig_path_ptr;
|
|
size_t out_zig_path_len;
|
|
Stage2ErrorMsg *errors_ptr;
|
|
size_t errors_len;
|
|
if ((err = stage2_cimport(&ira->codegen->stage1,
|
|
buf_ptr(&cimport_scope->buf), buf_len(&cimport_scope->buf),
|
|
&out_zig_path_ptr, &out_zig_path_len,
|
|
&errors_ptr, &errors_len)))
|
|
{
|
|
if (err != ErrorCCompileErrors) {
|
|
ir_add_error_node(ira, node, buf_sprintf("C import failed: %s", err_str(err)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ErrorMsg *parent_err_msg = ir_add_error_node(ira, node, buf_sprintf("C import failed"));
|
|
if (!ira->codegen->stage1.link_libc) {
|
|
add_error_note(ira->codegen, parent_err_msg, node,
|
|
buf_sprintf("libc headers not available; compilation does not link against libc"));
|
|
}
|
|
for (size_t i = 0; i < errors_len; i += 1) {
|
|
Stage2ErrorMsg *clang_err = &errors_ptr[i];
|
|
// Clang can emit "too many errors, stopping now", in which case `source` and `filename_ptr` are null
|
|
if (clang_err->source && clang_err->filename_ptr) {
|
|
ErrorMsg *err_msg = err_msg_create_with_offset(
|
|
clang_err->filename_ptr ?
|
|
buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(),
|
|
clang_err->line, clang_err->column, clang_err->offset, clang_err->source,
|
|
buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len));
|
|
err_msg_add_note(parent_err_msg, err_msg);
|
|
}
|
|
}
|
|
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
Buf *out_zig_path = buf_create_from_mem(out_zig_path_ptr, out_zig_path_len);
|
|
|
|
Buf *import_code = buf_alloc();
|
|
if ((err = file_fetch(ira->codegen, out_zig_path, import_code))) {
|
|
ir_add_error_node(ira, node,
|
|
buf_sprintf("unable to open '%s': %s", buf_ptr(out_zig_path), err_str(err)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigType *child_import = add_source_file(ira->codegen, cimport_pkg, out_zig_path,
|
|
import_code, SourceKindCImport);
|
|
return ir_const_type(ira, &instruction->base.base, child_import);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_c_include(IrAnalyze *ira, IrInstSrcCInclude *instruction) {
|
|
IrInstGen *name_value = instruction->name->child;
|
|
if (type_is_invalid(name_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *include_name = ir_resolve_str(ira, name_value);
|
|
if (!include_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *c_import_buf = ira->new_irb.exec->c_import_buf;
|
|
// We check for this error in pass1
|
|
assert(c_import_buf);
|
|
|
|
buf_appendf(c_import_buf, "#include <%s>\n", buf_ptr(include_name));
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_c_define(IrAnalyze *ira, IrInstSrcCDefine *instruction) {
|
|
IrInstGen *name = instruction->name->child;
|
|
if (type_is_invalid(name->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *define_name = ir_resolve_str(ira, name);
|
|
if (!define_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *define_value = nullptr;
|
|
// The second parameter is either a string or void (equivalent to "")
|
|
if (value->value->type->id != ZigTypeIdVoid) {
|
|
define_value = ir_resolve_str(ira, value);
|
|
if (!define_value)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Buf *c_import_buf = ira->new_irb.exec->c_import_buf;
|
|
// We check for this error in pass1
|
|
assert(c_import_buf);
|
|
|
|
buf_appendf(c_import_buf, "#define %s %s\n", buf_ptr(define_name),
|
|
define_value ? buf_ptr(define_value) : "");
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_c_undef(IrAnalyze *ira, IrInstSrcCUndef *instruction) {
|
|
IrInstGen *name = instruction->name->child;
|
|
if (type_is_invalid(name->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *undef_name = ir_resolve_str(ira, name);
|
|
if (!undef_name)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *c_import_buf = ira->new_irb.exec->c_import_buf;
|
|
// We check for this error in pass1
|
|
assert(c_import_buf);
|
|
|
|
buf_appendf(c_import_buf, "#undef %s\n", buf_ptr(undef_name));
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_embed_file(IrAnalyze *ira, IrInstSrcEmbedFile *instruction) {
|
|
IrInstGen *name = instruction->name->child;
|
|
if (type_is_invalid(name->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *rel_file_path = ir_resolve_str(ira, name);
|
|
if (!rel_file_path)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *import = get_scope_import(instruction->base.base.scope);
|
|
// figure out absolute path to resource
|
|
Buf source_dir_path = BUF_INIT;
|
|
os_path_dirname(import->data.structure.root_struct->path, &source_dir_path);
|
|
|
|
Buf *resolve_paths[] = {
|
|
&source_dir_path,
|
|
rel_file_path,
|
|
};
|
|
Buf *file_path = buf_alloc();
|
|
*file_path = os_path_resolve(resolve_paths, 2);
|
|
|
|
// load from file system into const expr
|
|
Buf *file_contents = buf_alloc();
|
|
Error err;
|
|
if ((err = file_fetch(ira->codegen, file_path, file_contents))) {
|
|
if (err == ErrorFileNotFound) {
|
|
ir_add_error(ira, &instruction->name->base,
|
|
buf_sprintf("unable to find '%s'", buf_ptr(file_path)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
ir_add_error(ira, &instruction->name->base,
|
|
buf_sprintf("unable to open '%s': %s", buf_ptr(file_path), err_str(err)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
|
|
init_const_str_lit(ira->codegen, result->value, file_contents);
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxchg *instruction) {
|
|
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->child);
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (operand_type->id == ZigTypeIdFloat) {
|
|
ir_add_error(ira, &instruction->type_value->child->base,
|
|
buf_sprintf("expected bool, integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *ptr = instruction->ptr->child;
|
|
if (type_is_invalid(ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// TODO let this be volatile
|
|
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
|
|
IrInstGen *casted_ptr = ir_implicit_cast2(ira, &instruction->ptr->base, ptr, ptr_type);
|
|
if (type_is_invalid(casted_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *cmp_value = instruction->cmp_value->child;
|
|
if (type_is_invalid(cmp_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *new_value = instruction->new_value->child;
|
|
if (type_is_invalid(new_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *success_order_value = instruction->success_order_value->child;
|
|
if (type_is_invalid(success_order_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicOrder success_order;
|
|
if (!ir_resolve_atomic_order(ira, success_order_value, &success_order))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *failure_order_value = instruction->failure_order_value->child;
|
|
if (type_is_invalid(failure_order_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicOrder failure_order;
|
|
if (!ir_resolve_atomic_order(ira, failure_order_value, &failure_order))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_cmp_value = ir_implicit_cast2(ira, &instruction->cmp_value->base, cmp_value, operand_type);
|
|
if (type_is_invalid(casted_cmp_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_new_value = ir_implicit_cast2(ira, &instruction->new_value->base, new_value, operand_type);
|
|
if (type_is_invalid(casted_new_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (success_order < AtomicOrderMonotonic) {
|
|
ir_add_error(ira, &success_order_value->base,
|
|
buf_sprintf("success atomic ordering must be Monotonic or stricter"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (failure_order < AtomicOrderMonotonic) {
|
|
ir_add_error(ira, &failure_order_value->base,
|
|
buf_sprintf("failure atomic ordering must be Monotonic or stricter"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (failure_order > success_order) {
|
|
ir_add_error(ira, &failure_order_value->base,
|
|
buf_sprintf("failure atomic ordering must be no stricter than success"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (failure_order == AtomicOrderRelease || failure_order == AtomicOrderAcqRel) {
|
|
ir_add_error(ira, &failure_order_value->base,
|
|
buf_sprintf("failure atomic ordering must not be Release or AcqRel"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *result_type = get_optional_type(ira->codegen, operand_type);
|
|
|
|
// special case zero bit types
|
|
switch (type_has_one_possible_value(ira->codegen, operand_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes: {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
|
|
set_optional_value_to_null(result->value);
|
|
return result;
|
|
}
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
|
|
instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *stored_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
|
|
if (stored_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *expected_val = ir_resolve_const(ira, casted_cmp_value, UndefBad);
|
|
if (expected_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *new_val = ir_resolve_const(ira, casted_new_value, UndefBad);
|
|
if (new_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool eql = const_values_equal(ira->codegen, stored_val, expected_val);
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
|
|
if (eql) {
|
|
copy_const_val(ira->codegen, stored_val, new_val);
|
|
set_optional_value_to_null(result->value);
|
|
} else {
|
|
set_optional_payload(result->value, stored_val);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *result_loc;
|
|
if (handle_is_ptr(ira->codegen, result_type)) {
|
|
result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
|
result_type, nullptr, true, true);
|
|
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc;
|
|
}
|
|
} else {
|
|
result_loc = nullptr;
|
|
}
|
|
|
|
return ir_build_cmpxchg_gen(ira, &instruction->base.base, result_type,
|
|
casted_ptr, casted_cmp_value, casted_new_value,
|
|
success_order, failure_order, instruction->is_weak, result_loc);
|
|
}
|
|
|
|
static ErrorMsg *ir_eval_reduce(IrAnalyze *ira, IrInst *source_instr, ReduceOp op, ZigValue *value, ZigValue *out_value) {
|
|
assert(value->type->id == ZigTypeIdVector);
|
|
ZigType *scalar_type = value->type->data.vector.elem_type;
|
|
const size_t len = value->type->data.vector.len;
|
|
assert(len > 0);
|
|
|
|
out_value->type = scalar_type;
|
|
out_value->special = ConstValSpecialStatic;
|
|
|
|
if (scalar_type->id == ZigTypeIdBool) {
|
|
ZigValue *first_elem_val = &value->data.x_array.data.s_none.elements[0];
|
|
|
|
bool result = first_elem_val->data.x_bool;
|
|
for (size_t i = 1; i < len; i++) {
|
|
ZigValue *elem_val = &value->data.x_array.data.s_none.elements[i];
|
|
|
|
switch (op) {
|
|
case ReduceOp_and:
|
|
result = result && elem_val->data.x_bool;
|
|
if (!result) break; // Short circuit
|
|
break;
|
|
case ReduceOp_or:
|
|
result = result || elem_val->data.x_bool;
|
|
if (result) break; // Short circuit
|
|
break;
|
|
case ReduceOp_xor:
|
|
result = result != elem_val->data.x_bool;
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
out_value->data.x_bool = result;
|
|
return nullptr;
|
|
}
|
|
|
|
// Evaluate and/or/xor.
|
|
if (op == ReduceOp_and || op == ReduceOp_or || op == ReduceOp_xor) {
|
|
ZigValue *first_elem_val = &value->data.x_array.data.s_none.elements[0];
|
|
|
|
copy_const_val(ira->codegen, out_value, first_elem_val);
|
|
|
|
for (size_t i = 1; i < len; i++) {
|
|
ZigValue *elem_val = &value->data.x_array.data.s_none.elements[i];
|
|
|
|
IrBinOp bin_op;
|
|
switch (op) {
|
|
case ReduceOp_and: bin_op = IrBinOpBinAnd; break;
|
|
case ReduceOp_or: bin_op = IrBinOpBinOr; break;
|
|
case ReduceOp_xor: bin_op = IrBinOpBinXor; break;
|
|
default: zig_unreachable();
|
|
}
|
|
|
|
ErrorMsg *msg = ir_eval_math_op_scalar(ira, source_instr, scalar_type,
|
|
out_value, bin_op, elem_val, out_value);
|
|
if (msg != nullptr)
|
|
return msg;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
// Evaluate add/sub.
|
|
// Perform the reduction sequentially, starting from the neutral value.
|
|
if (op == ReduceOp_add || op == ReduceOp_mul) {
|
|
if (scalar_type->id == ZigTypeIdInt) {
|
|
if (op == ReduceOp_add) {
|
|
bigint_init_unsigned(&out_value->data.x_bigint, 0);
|
|
} else {
|
|
bigint_init_unsigned(&out_value->data.x_bigint, 1);
|
|
}
|
|
} else {
|
|
if (op == ReduceOp_add) {
|
|
float_init_f64(out_value, -0.0);
|
|
} else {
|
|
float_init_f64(out_value, 1.0);
|
|
}
|
|
}
|
|
|
|
for (size_t i = 0; i < len; i++) {
|
|
ZigValue *elem_val = &value->data.x_array.data.s_none.elements[i];
|
|
|
|
IrBinOp bin_op;
|
|
switch (op) {
|
|
case ReduceOp_add: bin_op = IrBinOpAdd; break;
|
|
case ReduceOp_mul: bin_op = IrBinOpMult; break;
|
|
default: zig_unreachable();
|
|
}
|
|
|
|
ErrorMsg *msg = ir_eval_math_op_scalar(ira, source_instr, scalar_type,
|
|
out_value, bin_op, elem_val, out_value);
|
|
if (msg != nullptr)
|
|
return msg;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
// Evaluate min/max.
|
|
ZigValue *candidate_elem_val = &value->data.x_array.data.s_none.elements[0];
|
|
|
|
ZigValue *dummy_cmp_value = ira->codegen->pass1_arena->create<ZigValue>();
|
|
for (size_t i = 1; i < len; i++) {
|
|
ZigValue *elem_val = &value->data.x_array.data.s_none.elements[i];
|
|
|
|
IrBinOp bin_op;
|
|
switch (op) {
|
|
case ReduceOp_min: bin_op = IrBinOpCmpLessThan; break;
|
|
case ReduceOp_max: bin_op = IrBinOpCmpGreaterThan; break;
|
|
default: zig_unreachable();
|
|
}
|
|
|
|
ErrorMsg *msg = ir_eval_bin_op_cmp_scalar(ira, source_instr,
|
|
elem_val, bin_op, candidate_elem_val, dummy_cmp_value);
|
|
if (msg != nullptr)
|
|
return msg;
|
|
|
|
if (dummy_cmp_value->data.x_bool)
|
|
candidate_elem_val = elem_val;
|
|
}
|
|
|
|
ira->codegen->pass1_arena->destroy(dummy_cmp_value);
|
|
copy_const_val(ira->codegen, out_value, candidate_elem_val);
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_reduce(IrAnalyze *ira, IrInstSrcReduce *instruction) {
|
|
IrInstGen *op_inst = instruction->op->child;
|
|
if (type_is_invalid(op_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *value_inst = instruction->value->child;
|
|
if (type_is_invalid(value_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *value_type = value_inst->value->type;
|
|
if (value_type->id != ZigTypeIdVector) {
|
|
ir_add_error(ira, &value_inst->base,
|
|
buf_sprintf("expected vector type, found '%s'",
|
|
buf_ptr(&value_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ReduceOp op;
|
|
if (!ir_resolve_reduce_op(ira, op_inst, &op))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *elem_type = value_type->data.vector.elem_type;
|
|
switch (elem_type->id) {
|
|
case ZigTypeIdInt:
|
|
break;
|
|
case ZigTypeIdBool:
|
|
if (op > ReduceOp_xor) {
|
|
ir_add_error(ira, &op_inst->base,
|
|
buf_sprintf("invalid operation for '%s' type",
|
|
buf_ptr(&elem_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} break;
|
|
case ZigTypeIdFloat:
|
|
if (op < ReduceOp_min) {
|
|
ir_add_error(ira, &op_inst->base,
|
|
buf_sprintf("invalid operation for '%s' type",
|
|
buf_ptr(&elem_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} break;
|
|
default:
|
|
// Vectors cannot have child types other than those listed above
|
|
zig_unreachable();
|
|
}
|
|
|
|
// special case zero bit types
|
|
switch (type_has_one_possible_value(ira->codegen, elem_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_move(ira, &instruction->base.base,
|
|
get_the_one_possible_value(ira->codegen, elem_type));
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
if (instr_is_comptime(value_inst)) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, elem_type);
|
|
if (ir_eval_reduce(ira, &instruction->base.base, op, value_inst->value, result->value))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_reduce_gen(ira, &instruction->base.base, op, value_inst, elem_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstSrcFence *instruction) {
|
|
IrInstGen *order_inst = instruction->order->child;
|
|
if (type_is_invalid(order_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicOrder order;
|
|
if (!ir_resolve_atomic_order(ira, order_inst, &order))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (order < AtomicOrderAcquire) {
|
|
ir_add_error(ira, &order_inst->base,
|
|
buf_sprintf("atomic ordering must be Acquire or stricter"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_build_fence_gen(ira, &instruction->base.base, order);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstSrcTruncate *instruction) {
|
|
IrInstGen *dest_type_value = instruction->dest_type->child;
|
|
ZigType *dest_type = ir_resolve_type(ira, dest_type_value);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdInt &&
|
|
dest_type->id != ZigTypeIdComptimeInt)
|
|
{
|
|
ir_add_error(ira, &dest_type_value->base, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
ZigType *src_type = target->value->type;
|
|
if (type_is_invalid(src_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (src_type->id != ZigTypeIdInt &&
|
|
src_type->id != ZigTypeIdComptimeInt)
|
|
{
|
|
ir_add_error(ira, &target->base, buf_sprintf("expected integer type, found '%s'", buf_ptr(&src_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (dest_type->id == ZigTypeIdComptimeInt) {
|
|
return ir_implicit_cast2(ira, &instruction->target->base, target, dest_type);
|
|
}
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, dest_type);
|
|
bigint_truncate(&result->value->data.x_bigint, &val->data.x_bigint,
|
|
dest_type->data.integral.bit_count, dest_type->data.integral.is_signed);
|
|
return result;
|
|
}
|
|
|
|
if (src_type->data.integral.bit_count == 0 || dest_type->data.integral.bit_count == 0) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, dest_type);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, 0);
|
|
return result;
|
|
}
|
|
|
|
if (src_type->data.integral.is_signed != dest_type->data.integral.is_signed) {
|
|
const char *sign_str = dest_type->data.integral.is_signed ? "signed" : "unsigned";
|
|
ir_add_error(ira, &target->base, buf_sprintf("expected %s integer type, found '%s'", sign_str, buf_ptr(&src_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (src_type->data.integral.bit_count < dest_type->data.integral.bit_count) {
|
|
ir_add_error(ira, &target->base, buf_sprintf("type '%s' has fewer bits than destination type '%s'",
|
|
buf_ptr(&src_type->name), buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_build_truncate_gen(ira, &instruction->base.base, dest_type, target);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_int_cast(IrAnalyze *ira, IrInstSrcIntCast *instruction) {
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *scalar_dest_type = (dest_type->id == ZigTypeIdVector) ?
|
|
dest_type->data.vector.elem_type : dest_type;
|
|
|
|
if (scalar_dest_type->id != ZigTypeIdInt && scalar_dest_type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &instruction->dest_type->base,
|
|
buf_sprintf("expected integer type, found '%s'", buf_ptr(&scalar_dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *scalar_target_type = (target->value->type->id == ZigTypeIdVector) ?
|
|
target->value->type->data.vector.elem_type : target->value->type;
|
|
|
|
if (scalar_target_type->id != ZigTypeIdInt && scalar_target_type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &instruction->target->base, buf_sprintf("expected integer type, found '%s'",
|
|
buf_ptr(&scalar_target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (scalar_dest_type->id == ZigTypeIdComptimeInt) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_implicit_cast2(ira, &instruction->target->base, target, dest_type);
|
|
}
|
|
|
|
return ir_analyze_widen_or_shorten(ira, &instruction->base.base, target, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_float_cast(IrAnalyze *ira, IrInstSrcFloatCast *instruction) {
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdFloat && dest_type->id != ZigTypeIdComptimeFloat) {
|
|
ir_add_error(ira, &instruction->dest_type->base,
|
|
buf_sprintf("expected float type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target->value->type->id == ZigTypeIdComptimeInt ||
|
|
target->value->type->id == ZigTypeIdComptimeFloat)
|
|
{
|
|
if (ir_num_lit_fits_in_other_type(ira, target, dest_type, true)) {
|
|
CastOp op;
|
|
if (target->value->type->id == ZigTypeIdComptimeInt) {
|
|
op = CastOpIntToFloat;
|
|
} else {
|
|
op = CastOpNumLitToConcrete;
|
|
}
|
|
return ir_resolve_cast(ira, &instruction->base.base, target, dest_type, op);
|
|
} else {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (target->value->type->id != ZigTypeIdFloat) {
|
|
ir_add_error(ira, &instruction->target->base, buf_sprintf("expected float type, found '%s'",
|
|
buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(target) || dest_type->id == ZigTypeIdComptimeFloat) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// XXX: This will trigger an assertion failure if dest_type is comptime_float
|
|
return ir_analyze_widen_or_shorten(ira, &instruction->target->base, target, dest_type);
|
|
}
|
|
|
|
return ir_analyze_widen_or_shorten(ira, &instruction->base.base, target, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_err_set_cast(IrAnalyze *ira, IrInstSrcErrSetCast *instruction) {
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdErrorSet) {
|
|
ir_add_error(ira, &instruction->dest_type->base,
|
|
buf_sprintf("expected error set type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target->value->type->id != ZigTypeIdErrorSet) {
|
|
ir_add_error(ira, &instruction->target->base,
|
|
buf_sprintf("expected error set type, found '%s'", buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_analyze_err_set_cast(ira, &instruction->base.base, target, dest_type);
|
|
}
|
|
|
|
static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align) {
|
|
Error err;
|
|
|
|
ZigType *ptr_type;
|
|
if (is_slice(ty)) {
|
|
TypeStructField *ptr_field = ty->data.structure.fields[slice_ptr_index];
|
|
ptr_type = resolve_struct_field_type(ira->codegen, ptr_field);
|
|
} else {
|
|
ptr_type = get_src_ptr_type(ty);
|
|
}
|
|
assert(ptr_type != nullptr);
|
|
if (ptr_type->id == ZigTypeIdPointer) {
|
|
if ((err = type_resolve(ira->codegen, ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return err;
|
|
} else if (is_slice(ptr_type)) {
|
|
TypeStructField *ptr_field = ptr_type->data.structure.fields[slice_ptr_index];
|
|
ZigType *slice_ptr_type = resolve_struct_field_type(ira->codegen, ptr_field);
|
|
if ((err = type_resolve(ira->codegen, slice_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
|
|
return err;
|
|
}
|
|
|
|
*result_align = get_ptr_align(ira->codegen, ty);
|
|
return ErrorNone;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_int_to_float(IrAnalyze *ira, IrInstSrcIntToFloat *instruction) {
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdFloat && dest_type->id != ZigTypeIdComptimeFloat) {
|
|
ir_add_error(ira, &instruction->dest_type->base,
|
|
buf_sprintf("expected float type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target->value->type->id != ZigTypeIdInt && target->value->type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &instruction->target->base, buf_sprintf("expected int type, found '%s'",
|
|
buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_resolve_cast(ira, &instruction->base.base, target, dest_type, CastOpIntToFloat);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrInstSrcFloatToInt *instruction) {
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdInt && dest_type->id != ZigTypeIdComptimeInt) {
|
|
ir_add_error(ira, &instruction->dest_type->base, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target->value->type->id == ZigTypeIdComptimeInt) {
|
|
return ir_implicit_cast(ira, target, dest_type);
|
|
}
|
|
|
|
if (target->value->type->id != ZigTypeIdFloat && target->value->type->id != ZigTypeIdComptimeFloat) {
|
|
ir_add_error_node(ira, target->base.source_node, buf_sprintf("expected float type, found '%s'",
|
|
buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_resolve_cast(ira, &instruction->base.base, target, dest_type, CastOpFloatToInt);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_err_to_int(IrAnalyze *ira, IrInstSrcErrToInt *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_target;
|
|
if (target->value->type->id == ZigTypeIdErrorSet) {
|
|
casted_target = target;
|
|
} else {
|
|
casted_target = ir_implicit_cast(ira, target, ira->codegen->builtin_types.entry_global_error_set);
|
|
if (type_is_invalid(casted_target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_analyze_err_to_int(ira, &instruction->base.base, casted_target, ira->codegen->err_tag_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_int_to_err(IrAnalyze *ira, IrInstSrcIntToErr *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_target = ir_implicit_cast(ira, target, ira->codegen->err_tag_type);
|
|
if (type_is_invalid(casted_target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_int_to_err(ira, &instruction->base.base, casted_target, ira->codegen->builtin_types.entry_global_error_set);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bool_to_int(IrAnalyze *ira, IrInstSrcBoolToInt *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (target->value->type->id != ZigTypeIdBool) {
|
|
ir_add_error(ira, &instruction->target->base, buf_sprintf("expected bool, found '%s'",
|
|
buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(target)) {
|
|
bool is_true;
|
|
if (!ir_resolve_bool(ira, target, &is_true))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_unsigned(ira, &instruction->base.base, is_true ? 1 : 0);
|
|
}
|
|
|
|
ZigType *u1_type = get_int_type(ira->codegen, false, 1);
|
|
return ir_resolve_cast(ira, &instruction->base.base, target, u1_type, CastOpBoolToInt);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_vector_type(IrAnalyze *ira, IrInstSrcVectorType *instruction) {
|
|
uint64_t len;
|
|
if (!ir_resolve_unsigned(ira, instruction->len->child, ira->codegen->builtin_types.entry_u32, &len))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *elem_type = ir_resolve_vector_elem_type(ira, instruction->elem_type->child);
|
|
if (type_is_invalid(elem_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *vector_type = get_vector_type(ira->codegen, len, elem_type);
|
|
|
|
return ir_const_type(ira, &instruction->base.base, vector_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_shuffle_vector(IrAnalyze *ira, IrInst* source_instr,
|
|
ZigType *scalar_type, IrInstGen *a, IrInstGen *b, IrInstGen *mask)
|
|
{
|
|
Error err;
|
|
ir_assert(source_instr && scalar_type && a && b && mask, source_instr);
|
|
|
|
if ((err = ir_validate_vector_elem_type(ira, source_instr->source_node, scalar_type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint32_t len_mask;
|
|
if (mask->value->type->id == ZigTypeIdVector) {
|
|
len_mask = mask->value->type->data.vector.len;
|
|
} else if (mask->value->type->id == ZigTypeIdArray) {
|
|
len_mask = mask->value->type->data.array.len;
|
|
} else {
|
|
ir_add_error(ira, &mask->base,
|
|
buf_sprintf("expected vector or array, found '%s'",
|
|
buf_ptr(&mask->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
mask = ir_implicit_cast(ira, mask, get_vector_type(ira->codegen, len_mask,
|
|
ira->codegen->builtin_types.entry_i32));
|
|
if (type_is_invalid(mask->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint32_t len_a;
|
|
if (a->value->type->id == ZigTypeIdVector) {
|
|
len_a = a->value->type->data.vector.len;
|
|
} else if (a->value->type->id == ZigTypeIdArray) {
|
|
len_a = a->value->type->data.array.len;
|
|
} else if (a->value->type->id == ZigTypeIdUndefined) {
|
|
len_a = UINT32_MAX;
|
|
} else {
|
|
ir_add_error(ira, &a->base,
|
|
buf_sprintf("expected vector or array with element type '%s', found '%s'",
|
|
buf_ptr(&scalar_type->name),
|
|
buf_ptr(&a->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
uint32_t len_b;
|
|
if (b->value->type->id == ZigTypeIdVector) {
|
|
len_b = b->value->type->data.vector.len;
|
|
} else if (b->value->type->id == ZigTypeIdArray) {
|
|
len_b = b->value->type->data.array.len;
|
|
} else if (b->value->type->id == ZigTypeIdUndefined) {
|
|
len_b = UINT32_MAX;
|
|
} else {
|
|
ir_add_error(ira, &b->base,
|
|
buf_sprintf("expected vector or array with element type '%s', found '%s'",
|
|
buf_ptr(&scalar_type->name),
|
|
buf_ptr(&b->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (len_a == UINT32_MAX && len_b == UINT32_MAX) {
|
|
return ir_const_undef(ira, &a->base, get_vector_type(ira->codegen, len_mask, scalar_type));
|
|
}
|
|
|
|
if (len_a == UINT32_MAX) {
|
|
len_a = len_b;
|
|
a = ir_const_undef(ira, &a->base, get_vector_type(ira->codegen, len_a, scalar_type));
|
|
} else {
|
|
a = ir_implicit_cast(ira, a, get_vector_type(ira->codegen, len_a, scalar_type));
|
|
if (type_is_invalid(a->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (len_b == UINT32_MAX) {
|
|
len_b = len_a;
|
|
b = ir_const_undef(ira, &b->base, get_vector_type(ira->codegen, len_b, scalar_type));
|
|
} else {
|
|
b = ir_implicit_cast(ira, b, get_vector_type(ira->codegen, len_b, scalar_type));
|
|
if (type_is_invalid(b->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *mask_val = ir_resolve_const(ira, mask, UndefOk);
|
|
if (mask_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
expand_undef_array(ira->codegen, mask_val);
|
|
|
|
for (uint32_t i = 0; i < len_mask; i += 1) {
|
|
ZigValue *mask_elem_val = &mask_val->data.x_array.data.s_none.elements[i];
|
|
if (mask_elem_val->special == ConstValSpecialUndef)
|
|
continue;
|
|
int32_t v_i32 = bigint_as_signed(&mask_elem_val->data.x_bigint);
|
|
uint32_t v;
|
|
IrInstGen *chosen_operand;
|
|
if (v_i32 >= 0) {
|
|
v = (uint32_t)v_i32;
|
|
chosen_operand = a;
|
|
} else {
|
|
v = (uint32_t)~v_i32;
|
|
chosen_operand = b;
|
|
}
|
|
if (v >= chosen_operand->value->type->data.vector.len) {
|
|
ErrorMsg *msg = ir_add_error(ira, &mask->base,
|
|
buf_sprintf("mask index '%u' has out-of-bounds selection", i));
|
|
add_error_note(ira->codegen, msg, chosen_operand->base.source_node,
|
|
buf_sprintf("selected index '%u' out of bounds of %s", v,
|
|
buf_ptr(&chosen_operand->value->type->name)));
|
|
if (chosen_operand == a && v < len_a + len_b) {
|
|
add_error_note(ira->codegen, msg, b->base.source_node,
|
|
buf_create_from_str("selections from the second vector are specified with negative numbers"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
ZigType *result_type = get_vector_type(ira->codegen, len_mask, scalar_type);
|
|
if (instr_is_comptime(a) && instr_is_comptime(b)) {
|
|
ZigValue *a_val = ir_resolve_const(ira, a, UndefOk);
|
|
if (a_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *b_val = ir_resolve_const(ira, b, UndefOk);
|
|
if (b_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
expand_undef_array(ira->codegen, a_val);
|
|
expand_undef_array(ira->codegen, b_val);
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, result_type);
|
|
result->value->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(len_mask);
|
|
for (uint32_t i = 0; i < mask_val->type->data.vector.len; i += 1) {
|
|
ZigValue *mask_elem_val = &mask_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *result_elem_val = &result->value->data.x_array.data.s_none.elements[i];
|
|
if (mask_elem_val->special == ConstValSpecialUndef) {
|
|
result_elem_val->special = ConstValSpecialUndef;
|
|
continue;
|
|
}
|
|
int32_t v = bigint_as_signed(&mask_elem_val->data.x_bigint);
|
|
// We've already checked for and emitted compile errors for index out of bounds here.
|
|
ZigValue *src_elem_val = (v >= 0) ?
|
|
&a->value->data.x_array.data.s_none.elements[v] :
|
|
&b->value->data.x_array.data.s_none.elements[~v];
|
|
copy_const_val(ira->codegen, result_elem_val, src_elem_val);
|
|
|
|
ir_assert(result_elem_val->special == ConstValSpecialStatic, source_instr);
|
|
}
|
|
result->value->special = ConstValSpecialStatic;
|
|
return result;
|
|
}
|
|
|
|
// All static analysis passed, and not comptime.
|
|
// For runtime codegen, vectors a and b must be the same length. Here we
|
|
// recursively @shuffle the smaller vector to append undefined elements
|
|
// to it up to the length of the longer vector. This recursion terminates
|
|
// in 1 call because these calls to ir_analyze_shuffle_vector guarantee
|
|
// len_a == len_b.
|
|
if (len_a != len_b) {
|
|
uint32_t len_min = min(len_a, len_b);
|
|
uint32_t len_max = max(len_a, len_b);
|
|
|
|
IrInstGen *expand_mask = ir_const(ira, &mask->base,
|
|
get_vector_type(ira->codegen, len_max, ira->codegen->builtin_types.entry_i32));
|
|
expand_mask->value->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(len_max);
|
|
uint32_t i = 0;
|
|
for (; i < len_min; i += 1)
|
|
bigint_init_unsigned(&expand_mask->value->data.x_array.data.s_none.elements[i].data.x_bigint, i);
|
|
for (; i < len_max; i += 1)
|
|
bigint_init_signed(&expand_mask->value->data.x_array.data.s_none.elements[i].data.x_bigint, -1);
|
|
|
|
IrInstGen *undef = ir_const_undef(ira, source_instr,
|
|
get_vector_type(ira->codegen, len_min, scalar_type));
|
|
|
|
if (len_b < len_a) {
|
|
b = ir_analyze_shuffle_vector(ira, source_instr, scalar_type, b, undef, expand_mask);
|
|
} else {
|
|
a = ir_analyze_shuffle_vector(ira, source_instr, scalar_type, a, undef, expand_mask);
|
|
}
|
|
}
|
|
|
|
return ir_build_shuffle_vector_gen(ira, source_instr->scope, source_instr->source_node,
|
|
result_type, a, b, mask);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_shuffle_vector(IrAnalyze *ira, IrInstSrcShuffleVector *instruction) {
|
|
ZigType *scalar_type = ir_resolve_vector_elem_type(ira, instruction->scalar_type->child);
|
|
if (type_is_invalid(scalar_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *a = instruction->a->child;
|
|
if (type_is_invalid(a->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *b = instruction->b->child;
|
|
if (type_is_invalid(b->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *mask = instruction->mask->child;
|
|
if (type_is_invalid(mask->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_shuffle_vector(ira, &instruction->base.base, scalar_type, a, b, mask);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_splat(IrAnalyze *ira, IrInstSrcSplat *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *len = instruction->len->child;
|
|
if (type_is_invalid(len->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *scalar = instruction->scalar->child;
|
|
if (type_is_invalid(scalar->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t len_u64;
|
|
if (!ir_resolve_unsigned(ira, len, ira->codegen->builtin_types.entry_u32, &len_u64))
|
|
return ira->codegen->invalid_inst_gen;
|
|
uint32_t len_int = len_u64;
|
|
|
|
if ((err = ir_validate_vector_elem_type(ira, scalar->base.source_node, scalar->value->type)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *return_type = get_vector_type(ira->codegen, len_int, scalar->value->type);
|
|
|
|
if (instr_is_comptime(scalar)) {
|
|
ZigValue *scalar_val = ir_resolve_const(ira, scalar, UndefOk);
|
|
if (scalar_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (scalar_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, return_type);
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, return_type);
|
|
result->value->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(len_int);
|
|
for (uint32_t i = 0; i < len_int; i += 1) {
|
|
copy_const_val(ira->codegen, &result->value->data.x_array.data.s_none.elements[i], scalar_val);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
return ir_build_splat_gen(ira, &instruction->base.base, return_type, scalar);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bool_not(IrAnalyze *ira, IrInstSrcBoolNot *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *bool_type = ira->codegen->builtin_types.entry_bool;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, bool_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_value)) {
|
|
ZigValue *value = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (value == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_bool(ira, &instruction->base.base, !value->data.x_bool);
|
|
}
|
|
|
|
return ir_build_bool_not_gen(ira, &instruction->base.base, casted_value);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstSrcMemset *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *dest_ptr = instruction->dest_ptr->child;
|
|
if (type_is_invalid(dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *byte_value = instruction->byte->child;
|
|
if (type_is_invalid(byte_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *count_value = instruction->count->child;
|
|
if (type_is_invalid(count_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *dest_uncasted_type = dest_ptr->value->type;
|
|
bool dest_is_volatile = (dest_uncasted_type->id == ZigTypeIdPointer) &&
|
|
dest_uncasted_type->data.pointer.is_volatile;
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
ZigType *u8 = ira->codegen->builtin_types.entry_u8;
|
|
uint32_t dest_align;
|
|
if (dest_uncasted_type->id == ZigTypeIdPointer) {
|
|
if ((err = resolve_ptr_align(ira, dest_uncasted_type, &dest_align)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
dest_align = get_abi_alignment(ira->codegen, u8);
|
|
}
|
|
ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile,
|
|
PtrLenUnknown, dest_align, 0, 0, false);
|
|
|
|
IrInstGen *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr);
|
|
if (type_is_invalid(casted_dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_byte = ir_implicit_cast(ira, byte_value, u8);
|
|
if (type_is_invalid(casted_byte->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_count = ir_implicit_cast(ira, count_value, usize);
|
|
if (type_is_invalid(casted_count->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// TODO test this at comptime with u8 and non-u8 types
|
|
if (instr_is_comptime(casted_dest_ptr) &&
|
|
instr_is_comptime(casted_byte) &&
|
|
instr_is_comptime(casted_count))
|
|
{
|
|
ZigValue *dest_ptr_val = ir_resolve_const(ira, casted_dest_ptr, UndefBad);
|
|
if (dest_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *byte_val = ir_resolve_const(ira, casted_byte, UndefOk);
|
|
if (byte_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *count_val = ir_resolve_const(ira, casted_count, UndefBad);
|
|
if (count_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (casted_dest_ptr->value->data.x_ptr.special != ConstPtrSpecialHardCodedAddr &&
|
|
casted_dest_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar)
|
|
{
|
|
ZigValue *dest_elements;
|
|
size_t start;
|
|
size_t bound_end;
|
|
switch (dest_ptr_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
dest_elements = dest_ptr_val->data.x_ptr.data.ref.pointee;
|
|
start = 0;
|
|
bound_end = 1;
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
{
|
|
ZigValue *array_val = dest_ptr_val->data.x_ptr.data.base_array.array_val;
|
|
expand_undef_array(ira->codegen, array_val);
|
|
dest_elements = array_val->data.x_array.data.s_none.elements;
|
|
start = dest_ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
bound_end = array_val->type->data.array.len;
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO memset on const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO memset on const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO memset on const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO memset on const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO memset on ptr cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO memset on null ptr");
|
|
}
|
|
|
|
size_t count = bigint_as_usize(&count_val->data.x_bigint);
|
|
size_t end = start + count;
|
|
if (end > bound_end) {
|
|
ir_add_error(ira, &count_value->base, buf_sprintf("out of bounds pointer access"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
for (size_t i = start; i < end; i += 1) {
|
|
copy_const_val(ira->codegen, &dest_elements[i], byte_val);
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
}
|
|
|
|
return ir_build_memset_gen(ira, &instruction->base.base, casted_dest_ptr, casted_byte, casted_count);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstSrcMemcpy *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *dest_ptr = instruction->dest_ptr->child;
|
|
if (type_is_invalid(dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *src_ptr = instruction->src_ptr->child;
|
|
if (type_is_invalid(src_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *count_value = instruction->count->child;
|
|
if (type_is_invalid(count_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *u8 = ira->codegen->builtin_types.entry_u8;
|
|
ZigType *dest_uncasted_type = dest_ptr->value->type;
|
|
ZigType *src_uncasted_type = src_ptr->value->type;
|
|
bool dest_is_volatile = (dest_uncasted_type->id == ZigTypeIdPointer) &&
|
|
dest_uncasted_type->data.pointer.is_volatile;
|
|
bool src_is_volatile = (src_uncasted_type->id == ZigTypeIdPointer) &&
|
|
src_uncasted_type->data.pointer.is_volatile;
|
|
|
|
uint32_t dest_align;
|
|
if (dest_uncasted_type->id == ZigTypeIdPointer) {
|
|
if ((err = resolve_ptr_align(ira, dest_uncasted_type, &dest_align)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
dest_align = get_abi_alignment(ira->codegen, u8);
|
|
}
|
|
|
|
uint32_t src_align;
|
|
if (src_uncasted_type->id == ZigTypeIdPointer) {
|
|
if ((err = resolve_ptr_align(ira, src_uncasted_type, &src_align)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
src_align = get_abi_alignment(ira->codegen, u8);
|
|
}
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
ZigType *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile,
|
|
PtrLenUnknown, dest_align, 0, 0, false);
|
|
ZigType *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile,
|
|
PtrLenUnknown, src_align, 0, 0, false);
|
|
|
|
IrInstGen *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr_mut);
|
|
if (type_is_invalid(casted_dest_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_src_ptr = ir_implicit_cast(ira, src_ptr, u8_ptr_const);
|
|
if (type_is_invalid(casted_src_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_count = ir_implicit_cast(ira, count_value, usize);
|
|
if (type_is_invalid(casted_count->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// TODO test this at comptime with u8 and non-u8 types
|
|
// TODO test with dest ptr being a global runtime variable
|
|
if (instr_is_comptime(casted_dest_ptr) &&
|
|
instr_is_comptime(casted_src_ptr) &&
|
|
instr_is_comptime(casted_count))
|
|
{
|
|
ZigValue *dest_ptr_val = ir_resolve_const(ira, casted_dest_ptr, UndefBad);
|
|
if (dest_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *src_ptr_val = ir_resolve_const(ira, casted_src_ptr, UndefBad);
|
|
if (src_ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *count_val = ir_resolve_const(ira, casted_count, UndefBad);
|
|
if (count_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
|
|
size_t count = bigint_as_usize(&count_val->data.x_bigint);
|
|
|
|
ZigValue *dest_elements;
|
|
size_t dest_start;
|
|
size_t dest_end;
|
|
switch (dest_ptr_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
dest_elements = dest_ptr_val->data.x_ptr.data.ref.pointee;
|
|
dest_start = 0;
|
|
dest_end = 1;
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
{
|
|
ZigValue *array_val = dest_ptr_val->data.x_ptr.data.base_array.array_val;
|
|
expand_undef_array(ira->codegen, array_val);
|
|
dest_elements = array_val->data.x_array.data.s_none.elements;
|
|
dest_start = dest_ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
dest_end = array_val->type->data.array.len;
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO memcpy on const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO memcpy on const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO memcpy on const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO memcpy on const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO memcpy on ptr cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO memcpy on null ptr");
|
|
}
|
|
|
|
if (dest_start + count > dest_end) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("out of bounds pointer access"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *src_elements;
|
|
size_t src_start;
|
|
size_t src_end;
|
|
|
|
switch (src_ptr_val->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
src_elements = src_ptr_val->data.x_ptr.data.ref.pointee;
|
|
src_start = 0;
|
|
src_end = 1;
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
{
|
|
ZigValue *array_val = src_ptr_val->data.x_ptr.data.base_array.array_val;
|
|
expand_undef_array(ira->codegen, array_val);
|
|
src_elements = array_val->data.x_array.data.s_none.elements;
|
|
src_start = src_ptr_val->data.x_ptr.data.base_array.elem_index;
|
|
src_end = array_val->type->data.array.len;
|
|
break;
|
|
}
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO memcpy on const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO memcpy on const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO memcpy on const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO memcpy on const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO memcpy on ptr cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO memcpy on null ptr");
|
|
}
|
|
|
|
if (src_start + count > src_end) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("out of bounds pointer access"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// TODO check for noalias violations - this should be generalized to work for any function
|
|
|
|
for (size_t i = 0; i < count; i += 1) {
|
|
copy_const_val(ira->codegen, &dest_elements[dest_start + i], &src_elements[src_start + i]);
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
}
|
|
|
|
return ir_build_memcpy_gen(ira, &instruction->base.base, casted_dest_ptr, casted_src_ptr, casted_count);
|
|
}
|
|
|
|
static ZigType *get_result_loc_type(IrAnalyze *ira, ResultLoc *result_loc) {
|
|
if (result_loc == nullptr) return nullptr;
|
|
|
|
if (result_loc->id == ResultLocIdCast) {
|
|
return ir_resolve_type(ira, result_loc->source_instruction->child);
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *ptr_ptr = instruction->ptr->child;
|
|
if (type_is_invalid(ptr_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ptr_ptr_type = ptr_ptr->value->type;
|
|
assert(ptr_ptr_type->id == ZigTypeIdPointer);
|
|
ZigType *array_type = ptr_ptr_type->data.pointer.child_type;
|
|
|
|
IrInstGen *start = instruction->start->child;
|
|
if (type_is_invalid(start->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
IrInstGen *casted_start = ir_implicit_cast(ira, start, usize);
|
|
if (type_is_invalid(casted_start->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *end;
|
|
if (instruction->end) {
|
|
end = instruction->end->child;
|
|
if (type_is_invalid(end->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
end = ir_implicit_cast(ira, end, usize);
|
|
if (type_is_invalid(end->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
end = nullptr;
|
|
}
|
|
|
|
ZigValue *slice_sentinel_val = nullptr;
|
|
ZigType *non_sentinel_slice_ptr_type;
|
|
ZigType *elem_type;
|
|
|
|
bool generate_non_null_assert = false;
|
|
|
|
if (array_type->id == ZigTypeIdArray) {
|
|
elem_type = array_type->data.array.child_type;
|
|
non_sentinel_slice_ptr_type = get_pointer_to_type_extra(ira->codegen, elem_type,
|
|
ptr_ptr_type->data.pointer.is_const,
|
|
ptr_ptr_type->data.pointer.is_volatile,
|
|
PtrLenUnknown,
|
|
ptr_ptr_type->data.pointer.explicit_alignment, 0, 0, false);
|
|
} else if (array_type->id == ZigTypeIdPointer) {
|
|
if (array_type->data.pointer.ptr_len == PtrLenSingle) {
|
|
ZigType *main_type = array_type->data.pointer.child_type;
|
|
if (main_type->id == ZigTypeIdArray) {
|
|
elem_type = main_type->data.pointer.child_type;
|
|
non_sentinel_slice_ptr_type = get_pointer_to_type_extra(ira->codegen,
|
|
elem_type,
|
|
array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
|
|
PtrLenUnknown,
|
|
array_type->data.pointer.explicit_alignment, 0, 0, false);
|
|
} else {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice of single-item pointer"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else {
|
|
elem_type = array_type->data.pointer.child_type;
|
|
if (array_type->data.pointer.ptr_len == PtrLenC) {
|
|
array_type = adjust_ptr_len(ira->codegen, array_type, PtrLenUnknown);
|
|
|
|
// C pointers are allowzero by default.
|
|
// However, we want to be able to slice them without generating an allowzero slice (see issue #4401).
|
|
// To achieve this, we generate a runtime safety check and make the slice type non-allowzero.
|
|
if (array_type->data.pointer.allow_zero) {
|
|
array_type = adjust_ptr_allow_zero(ira->codegen, array_type, false);
|
|
generate_non_null_assert = true;
|
|
}
|
|
}
|
|
ZigType *maybe_sentineled_slice_ptr_type = array_type;
|
|
non_sentinel_slice_ptr_type = adjust_ptr_sentinel(ira->codegen, maybe_sentineled_slice_ptr_type, nullptr);
|
|
if (!end) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice of pointer must include end value"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
} else if (is_slice(array_type)) {
|
|
ZigType *maybe_sentineled_slice_ptr_type = array_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
slice_sentinel_val = maybe_sentineled_slice_ptr_type->data.pointer.sentinel;
|
|
non_sentinel_slice_ptr_type = adjust_ptr_sentinel(ira->codegen, maybe_sentineled_slice_ptr_type, nullptr);
|
|
elem_type = non_sentinel_slice_ptr_type->data.pointer.child_type;
|
|
} else {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("slice of non-array type '%s'", buf_ptr(&array_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *sentinel_val = nullptr;
|
|
if (instruction->sentinel) {
|
|
IrInstGen *uncasted_sentinel = instruction->sentinel->child;
|
|
if (type_is_invalid(uncasted_sentinel->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *sentinel = ir_implicit_cast(ira, uncasted_sentinel, elem_type);
|
|
if (type_is_invalid(sentinel->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
sentinel_val = ir_resolve_const(ira, sentinel, UndefBad);
|
|
if (sentinel_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *child_array_type = (array_type->id == ZigTypeIdPointer &&
|
|
array_type->data.pointer.ptr_len == PtrLenSingle) ? array_type->data.pointer.child_type : array_type;
|
|
|
|
ZigType *return_type;
|
|
|
|
// If start index and end index are both comptime known, then the result type is a pointer to array
|
|
// not a slice. However, if the start or end index is a lazy value, and the result location is a slice,
|
|
// then the pointer-to-array would be casted to a slice anyway. So, we preserve the laziness of these
|
|
// values by making the return type a slice.
|
|
ZigType *res_loc_type = get_result_loc_type(ira, instruction->result_loc);
|
|
bool result_loc_is_slice = (res_loc_type != nullptr && is_slice(res_loc_type));
|
|
bool end_is_known = !result_loc_is_slice &&
|
|
((end != nullptr && value_is_comptime(end->value)) ||
|
|
(end == nullptr && child_array_type->id == ZigTypeIdArray));
|
|
|
|
ZigValue *array_sentinel = sentinel_val;
|
|
if (end_is_known) {
|
|
uint64_t end_scalar;
|
|
if (end != nullptr) {
|
|
ZigValue *end_val = ir_resolve_const(ira, end, UndefBad);
|
|
if (!end_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
end_scalar = bigint_as_u64(&end_val->data.x_bigint);
|
|
} else {
|
|
end_scalar = child_array_type->data.array.len;
|
|
}
|
|
array_sentinel = (child_array_type->id == ZigTypeIdArray && end_scalar == child_array_type->data.array.len)
|
|
? child_array_type->data.array.sentinel : sentinel_val;
|
|
|
|
if (value_is_comptime(casted_start->value)) {
|
|
ZigValue *start_val = ir_resolve_const(ira, casted_start, UndefBad);
|
|
if (!start_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t start_scalar = bigint_as_u64(&start_val->data.x_bigint);
|
|
|
|
if (start_scalar > end_scalar) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("out of bounds slice"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
uint32_t base_ptr_align = non_sentinel_slice_ptr_type->data.pointer.explicit_alignment;
|
|
uint32_t ptr_byte_alignment = 0;
|
|
if (end_scalar > start_scalar) {
|
|
if ((err = compute_elem_align(ira, elem_type, base_ptr_align, start_scalar, &ptr_byte_alignment)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *return_array_type = get_array_type(ira->codegen, elem_type, end_scalar - start_scalar,
|
|
array_sentinel);
|
|
return_type = get_pointer_to_type_extra(ira->codegen, return_array_type,
|
|
non_sentinel_slice_ptr_type->data.pointer.is_const,
|
|
non_sentinel_slice_ptr_type->data.pointer.is_volatile,
|
|
PtrLenSingle, ptr_byte_alignment, 0, 0, false);
|
|
goto done_with_return_type;
|
|
}
|
|
} else if (array_sentinel == nullptr && end == nullptr) {
|
|
array_sentinel = slice_sentinel_val;
|
|
}
|
|
if (array_sentinel != nullptr) {
|
|
// TODO deal with non-abi-alignment here
|
|
ZigType *slice_ptr_type = adjust_ptr_sentinel(ira->codegen, non_sentinel_slice_ptr_type, array_sentinel);
|
|
return_type = get_slice_type(ira->codegen, slice_ptr_type);
|
|
} else {
|
|
// TODO deal with non-abi-alignment here
|
|
return_type = get_slice_type(ira->codegen, non_sentinel_slice_ptr_type);
|
|
}
|
|
done_with_return_type:
|
|
|
|
if (instr_is_comptime(ptr_ptr) &&
|
|
value_is_comptime(casted_start->value) &&
|
|
(!end || value_is_comptime(end->value)))
|
|
{
|
|
ZigValue *array_val;
|
|
ZigValue *parent_ptr;
|
|
size_t abs_offset;
|
|
size_t rel_end;
|
|
bool ptr_is_undef = false;
|
|
if (child_array_type->id == ZigTypeIdArray) {
|
|
if (array_type->id == ZigTypeIdPointer) {
|
|
parent_ptr = const_ptr_pointee(ira, ira->codegen, ptr_ptr->value, instruction->base.base.source_node);
|
|
if (parent_ptr == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (parent_ptr->special == ConstValSpecialUndef) {
|
|
array_val = nullptr;
|
|
abs_offset = 0;
|
|
rel_end = SIZE_MAX;
|
|
ptr_is_undef = true;
|
|
} else if (parent_ptr->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
|
|
array_val = nullptr;
|
|
abs_offset = 0;
|
|
rel_end = SIZE_MAX;
|
|
} else {
|
|
array_val = const_ptr_pointee(ira, ira->codegen, parent_ptr, instruction->base.base.source_node);
|
|
if (array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
rel_end = child_array_type->data.array.len;
|
|
abs_offset = 0;
|
|
}
|
|
} else {
|
|
array_val = const_ptr_pointee(ira, ira->codegen, ptr_ptr->value, instruction->base.base.source_node);
|
|
if (array_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
rel_end = array_type->data.array.len;
|
|
parent_ptr = nullptr;
|
|
abs_offset = 0;
|
|
}
|
|
} else if (array_type->id == ZigTypeIdPointer) {
|
|
assert(array_type->data.pointer.ptr_len == PtrLenUnknown);
|
|
parent_ptr = const_ptr_pointee(ira, ira->codegen, ptr_ptr->value, instruction->base.base.source_node);
|
|
if (parent_ptr == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (parent_ptr->special == ConstValSpecialUndef) {
|
|
array_val = nullptr;
|
|
abs_offset = 0;
|
|
rel_end = SIZE_MAX;
|
|
ptr_is_undef = true;
|
|
} else switch (parent_ptr->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
if (parent_ptr->data.x_ptr.data.ref.pointee->type->id == ZigTypeIdArray) {
|
|
array_val = parent_ptr->data.x_ptr.data.ref.pointee;
|
|
abs_offset = 0;
|
|
rel_end = array_val->type->data.array.len;
|
|
} else {
|
|
array_val = nullptr;
|
|
abs_offset = SIZE_MAX;
|
|
rel_end = 1;
|
|
}
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
array_val = parent_ptr->data.x_ptr.data.base_array.array_val;
|
|
abs_offset = parent_ptr->data.x_ptr.data.base_array.elem_index;
|
|
rel_end = array_val->type->data.array.len - abs_offset;
|
|
break;
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO slice const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO slice const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO slice const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO slice const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
array_val = nullptr;
|
|
abs_offset = 0;
|
|
rel_end = SIZE_MAX;
|
|
break;
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO slice of ptr cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO slice of null ptr");
|
|
}
|
|
} else if (is_slice(array_type)) {
|
|
ZigValue *slice_ptr = const_ptr_pointee(ira, ira->codegen, ptr_ptr->value, instruction->base.base.source_node);
|
|
if (slice_ptr == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (slice_ptr->special == ConstValSpecialUndef) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice of undefined"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
parent_ptr = slice_ptr->data.x_struct.fields[slice_ptr_index];
|
|
if (parent_ptr->special == ConstValSpecialUndef) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice of undefined"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *len_val = slice_ptr->data.x_struct.fields[slice_len_index];
|
|
|
|
switch (parent_ptr->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
array_val = nullptr;
|
|
abs_offset = SIZE_MAX;
|
|
rel_end = 1;
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
array_val = parent_ptr->data.x_ptr.data.base_array.array_val;
|
|
abs_offset = parent_ptr->data.x_ptr.data.base_array.elem_index;
|
|
rel_end = bigint_as_usize(&len_val->data.x_bigint);
|
|
break;
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO slice const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO slice const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO slice const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO slice const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
array_val = nullptr;
|
|
abs_offset = 0;
|
|
rel_end = bigint_as_usize(&len_val->data.x_bigint);
|
|
break;
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO slice of slice cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO slice of null");
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
|
|
ZigValue *start_val = ir_resolve_const(ira, casted_start, UndefBad);
|
|
if (!start_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t start_scalar = bigint_as_u64(&start_val->data.x_bigint);
|
|
if (!ptr_is_undef && start_scalar > rel_end) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("out of bounds slice"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
uint64_t end_scalar = rel_end;
|
|
if (end) {
|
|
ZigValue *end_val = ir_resolve_const(ira, end, UndefBad);
|
|
if (!end_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
end_scalar = bigint_as_u64(&end_val->data.x_bigint);
|
|
}
|
|
if (!ptr_is_undef) {
|
|
if (end_scalar > rel_end) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("out of bounds slice"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (start_scalar > end_scalar) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice start is greater than end"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
if (ptr_is_undef && start_scalar != end_scalar) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("non-zero length slice of undefined pointer"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// check sentinel when target is comptime-known
|
|
{
|
|
if (!sentinel_val)
|
|
goto exit_check_sentinel;
|
|
|
|
switch (ptr_ptr->value->data.x_ptr.mut) {
|
|
case ConstPtrMutComptimeConst:
|
|
case ConstPtrMutComptimeVar:
|
|
break;
|
|
case ConstPtrMutRuntimeVar:
|
|
case ConstPtrMutInfer:
|
|
goto exit_check_sentinel;
|
|
}
|
|
|
|
// prepare check parameters
|
|
ZigValue *target = const_ptr_pointee(ira, ira->codegen, ptr_ptr->value, instruction->base.base.source_node);
|
|
if (target == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t target_len = 0;
|
|
ZigValue *target_sentinel = nullptr;
|
|
ZigValue *target_elements = nullptr;
|
|
|
|
for (;;) {
|
|
if (target->type->id == ZigTypeIdArray) {
|
|
// handle `[N]T`
|
|
target_len = target->type->data.array.len;
|
|
target_sentinel = target->type->data.array.sentinel;
|
|
target_elements = target->data.x_array.data.s_none.elements;
|
|
break;
|
|
} else if (target->type->id == ZigTypeIdPointer && target->type->data.pointer.child_type->id == ZigTypeIdArray) {
|
|
// handle `*[N]T`
|
|
target = const_ptr_pointee(ira, ira->codegen, target, instruction->base.base.source_node);
|
|
if (target == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
assert(target->type->id == ZigTypeIdArray);
|
|
continue;
|
|
} else if (target->type->id == ZigTypeIdPointer) {
|
|
// handle `[*]T`
|
|
// handle `[*c]T`
|
|
switch (target->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
target = target->data.x_ptr.data.ref.pointee;
|
|
assert(target->type->id == ZigTypeIdArray);
|
|
continue;
|
|
case ConstPtrSpecialBaseArray:
|
|
case ConstPtrSpecialSubArray:
|
|
target = target->data.x_ptr.data.base_array.array_val;
|
|
assert(target->type->id == ZigTypeIdArray);
|
|
continue;
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO slice const inner struct");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO slice const inner error union code");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO slice const inner error union payload");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO slice const inner optional payload");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
// skip check
|
|
goto exit_check_sentinel;
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO slice of ptr cast from function");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO slice of null ptr");
|
|
}
|
|
break;
|
|
} else if (is_slice(target->type)) {
|
|
// handle `[]T`
|
|
target = target->data.x_struct.fields[slice_ptr_index];
|
|
assert(target->type->id == ZigTypeIdPointer);
|
|
continue;
|
|
}
|
|
|
|
zig_unreachable();
|
|
}
|
|
|
|
// perform check
|
|
if (target_sentinel == nullptr) {
|
|
if (end_scalar >= target_len) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice-sentinel is out of bounds"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!const_values_equal(ira->codegen, sentinel_val, &target_elements[end_scalar])) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice-sentinel does not match memory at target index"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else {
|
|
assert(end_scalar <= target_len);
|
|
if (end_scalar == target_len) {
|
|
if (!const_values_equal(ira->codegen, sentinel_val, target_sentinel)) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice-sentinel does not match target-sentinel"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else {
|
|
if (!const_values_equal(ira->codegen, sentinel_val, &target_elements[end_scalar])) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("slice-sentinel does not match memory at target index"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
exit_check_sentinel:
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, return_type);
|
|
|
|
ZigValue *ptr_val;
|
|
if (return_type->id == ZigTypeIdPointer) {
|
|
// pointer to array
|
|
ptr_val = result->value;
|
|
} else {
|
|
// slice
|
|
result->value->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, 2);
|
|
|
|
ptr_val = result->value->data.x_struct.fields[slice_ptr_index];
|
|
|
|
ZigValue *len_val = result->value->data.x_struct.fields[slice_len_index];
|
|
init_const_usize(ira->codegen, len_val, end_scalar - start_scalar);
|
|
}
|
|
|
|
bool return_type_is_const = non_sentinel_slice_ptr_type->data.pointer.is_const;
|
|
if (array_val) {
|
|
size_t index = abs_offset + start_scalar;
|
|
init_const_ptr_array(ira->codegen, ptr_val, array_val, index, return_type_is_const, PtrLenUnknown);
|
|
if (return_type->id == ZigTypeIdPointer) {
|
|
ptr_val->data.x_ptr.special = ConstPtrSpecialSubArray;
|
|
}
|
|
if (array_type->id == ZigTypeIdArray) {
|
|
ptr_val->data.x_ptr.mut = ptr_ptr->value->data.x_ptr.mut;
|
|
} else if (is_slice(array_type)) {
|
|
ptr_val->data.x_ptr.mut = parent_ptr->data.x_ptr.mut;
|
|
} else if (array_type->id == ZigTypeIdPointer) {
|
|
ptr_val->data.x_ptr.mut = parent_ptr->data.x_ptr.mut;
|
|
}
|
|
} else if (ptr_is_undef) {
|
|
ptr_val->type = get_pointer_to_type(ira->codegen, parent_ptr->type->data.pointer.child_type,
|
|
return_type_is_const);
|
|
ptr_val->special = ConstValSpecialUndef;
|
|
} else switch (parent_ptr->data.x_ptr.special) {
|
|
case ConstPtrSpecialInvalid:
|
|
case ConstPtrSpecialDiscard:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialRef:
|
|
init_const_ptr_ref(ira->codegen, ptr_val, parent_ptr->data.x_ptr.data.ref.pointee,
|
|
return_type_is_const);
|
|
break;
|
|
case ConstPtrSpecialSubArray:
|
|
case ConstPtrSpecialBaseArray:
|
|
zig_unreachable();
|
|
case ConstPtrSpecialBaseStruct:
|
|
zig_panic("TODO");
|
|
case ConstPtrSpecialBaseErrorUnionCode:
|
|
zig_panic("TODO");
|
|
case ConstPtrSpecialBaseErrorUnionPayload:
|
|
zig_panic("TODO");
|
|
case ConstPtrSpecialBaseOptionalPayload:
|
|
zig_panic("TODO");
|
|
case ConstPtrSpecialHardCodedAddr:
|
|
init_const_ptr_hard_coded_addr(ira->codegen, ptr_val,
|
|
parent_ptr->type->data.pointer.child_type,
|
|
parent_ptr->data.x_ptr.data.hard_coded_addr.addr + start_scalar,
|
|
return_type_is_const);
|
|
break;
|
|
case ConstPtrSpecialFunction:
|
|
zig_panic("TODO");
|
|
case ConstPtrSpecialNull:
|
|
zig_panic("TODO");
|
|
}
|
|
|
|
// In the case of pointer-to-array, we must restore this because above it overwrites ptr_val->type
|
|
result->value->type = return_type;
|
|
return result;
|
|
}
|
|
|
|
if (generate_non_null_assert) {
|
|
IrInstGen *ptr_val = ir_get_deref(ira, &instruction->base.base, ptr_ptr, nullptr);
|
|
|
|
if (type_is_invalid(ptr_val->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ir_build_assert_non_null(ira, &instruction->base.base, ptr_val);
|
|
}
|
|
|
|
IrInstGen *result_loc = nullptr;
|
|
|
|
if (return_type->id != ZigTypeIdPointer) {
|
|
result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
|
return_type, nullptr, true, true);
|
|
if (result_loc != nullptr) {
|
|
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
|
return result_loc;
|
|
}
|
|
|
|
ir_assert(result_loc->value->type->id == ZigTypeIdPointer, &instruction->base.base);
|
|
if (result_loc->value->type->data.pointer.is_const) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("cannot assign to constant"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *dummy_value = ir_const(ira, &instruction->base.base, return_type);
|
|
dummy_value->value->special = ConstValSpecialRuntime;
|
|
IrInstGen *dummy_result = ir_implicit_cast2(ira, &instruction->base.base,
|
|
dummy_value, result_loc->value->type->data.pointer.child_type);
|
|
if (type_is_invalid(dummy_result->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
return ir_build_slice_gen(ira, &instruction->base.base, return_type, ptr_ptr,
|
|
casted_start, end, instruction->safety_check_on, result_loc, sentinel_val);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_has_field(IrAnalyze *ira, IrInstSrcHasField *instruction) {
|
|
Error err;
|
|
ZigType *container_type = ir_resolve_type(ira, instruction->container_type->child);
|
|
if (type_is_invalid(container_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if ((err = type_resolve(ira->codegen, container_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *field_name = ir_resolve_str(ira, instruction->field_name->child);
|
|
if (field_name == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool result;
|
|
if (container_type->id == ZigTypeIdStruct) {
|
|
result = find_struct_type_field(container_type, field_name) != nullptr;
|
|
} else if (container_type->id == ZigTypeIdEnum) {
|
|
result = find_enum_type_field(container_type, field_name) != nullptr;
|
|
} else if (container_type->id == ZigTypeIdUnion) {
|
|
result = find_union_type_field(container_type, field_name) != nullptr;
|
|
} else {
|
|
ir_add_error(ira, &instruction->container_type->base,
|
|
buf_sprintf("type '%s' does not support @hasField", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
return ir_const_bool(ira, &instruction->base.base, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_wasm_memory_size(IrAnalyze *ira, IrInstSrcWasmMemorySize *instruction) {
|
|
// TODO generate compile error for target_arch different than 32bit
|
|
if (!target_is_wasm(ira->codegen->zig_target)) {
|
|
ir_add_error_node(ira, instruction->base.base.source_node,
|
|
buf_sprintf("@wasmMemorySize is a wasm32 feature only"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *index = instruction->index->child;
|
|
if (type_is_invalid(index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *u32 = ira->codegen->builtin_types.entry_u32;
|
|
|
|
IrInstGen *casted_index = ir_implicit_cast(ira, index, u32);
|
|
if (type_is_invalid(casted_index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_build_wasm_memory_size_gen(ira, &instruction->base.base, casted_index);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_wasm_memory_grow(IrAnalyze *ira, IrInstSrcWasmMemoryGrow *instruction) {
|
|
// TODO generate compile error for target_arch different than 32bit
|
|
if (!target_is_wasm(ira->codegen->zig_target)) {
|
|
ir_add_error_node(ira, instruction->base.base.source_node,
|
|
buf_sprintf("@wasmMemoryGrow is a wasm32 feature only"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *index = instruction->index->child;
|
|
if (type_is_invalid(index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *u32 = ira->codegen->builtin_types.entry_u32;
|
|
|
|
IrInstGen *casted_index = ir_implicit_cast(ira, index, u32);
|
|
if (type_is_invalid(casted_index->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *delta = instruction->delta->child;
|
|
if (type_is_invalid(delta->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_delta = ir_implicit_cast(ira, delta, u32);
|
|
if (type_is_invalid(casted_delta->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_build_wasm_memory_grow_gen(ira, &instruction->base.base, casted_index, casted_delta);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_breakpoint(IrAnalyze *ira, IrInstSrcBreakpoint *instruction) {
|
|
return ir_build_breakpoint_gen(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_return_address(IrAnalyze *ira, IrInstSrcReturnAddress *instruction) {
|
|
return ir_build_return_address_gen(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrInstSrcFrameAddress *instruction) {
|
|
return ir_build_frame_address_gen(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInstSrcFrameHandle *instruction) {
|
|
ZigFn *fn = ira->new_irb.exec->fn_entry;
|
|
ir_assert(fn != nullptr, &instruction->base.base);
|
|
|
|
if (fn->inferred_async_node == nullptr) {
|
|
fn->inferred_async_node = instruction->base.base.source_node;
|
|
}
|
|
|
|
ZigType *frame_type = get_fn_frame_type(ira->codegen, fn);
|
|
ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false);
|
|
|
|
return ir_build_handle_gen(ira, &instruction->base.base, ptr_frame_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstSrcFrameType *instruction) {
|
|
ZigFn *fn = ir_resolve_fn(ira, instruction->fn->child);
|
|
if (fn == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (fn->type_entry->data.fn.is_generic) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("@Frame() of generic function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *ty = get_fn_frame_type(ira->codegen, fn);
|
|
return ir_const_type(ira, &instruction->base.base, ty);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstSrcFrameSize *instruction) {
|
|
IrInstGen *fn = instruction->fn->child;
|
|
if (type_is_invalid(fn->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (fn->value->type->id != ZigTypeIdFn) {
|
|
ir_add_error(ira, &fn->base,
|
|
buf_sprintf("expected function, found '%s'", buf_ptr(&fn->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ira->codegen->need_frame_size_prefix_data = true;
|
|
|
|
return ir_build_frame_size_gen(ira, &instruction->base.base, fn);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstSrcAlignOf *instruction) {
|
|
// Here we create a lazy value in order to avoid resolving the alignment of the type
|
|
// immediately. This avoids false positive dependency loops such as:
|
|
// const Node = struct {
|
|
// field: []align(@alignOf(Node)) Node,
|
|
// };
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_num_lit_int);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueAlignOf *lazy_align_of = heap::c_allocator.create<LazyValueAlignOf>();
|
|
lazy_align_of->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_align_of->base;
|
|
lazy_align_of->base.id = LazyValueIdAlignOf;
|
|
|
|
lazy_align_of->target_type = instruction->type_value->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_align_of->target_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstSrcOverflowOp *instruction) {
|
|
Error err;
|
|
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
if (type_is_invalid(type_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *dest_type = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdInt) {
|
|
ir_add_error(ira, &type_value->base,
|
|
buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *op1 = instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, dest_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op2;
|
|
if (instruction->op == IrOverflowOpShl) {
|
|
ZigType *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen,
|
|
dest_type->data.integral.bit_count - 1);
|
|
casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type);
|
|
} else {
|
|
casted_op2 = ir_implicit_cast(ira, op2, dest_type);
|
|
}
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result_ptr = instruction->result_ptr->child;
|
|
if (type_is_invalid(result_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *expected_ptr_type;
|
|
if (result_ptr->value->type->id == ZigTypeIdPointer) {
|
|
uint32_t alignment;
|
|
if ((err = resolve_ptr_align(ira, result_ptr->value->type, &alignment)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
expected_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_type,
|
|
false, result_ptr->value->type->data.pointer.is_volatile,
|
|
PtrLenSingle,
|
|
alignment, 0, 0, false);
|
|
} else {
|
|
expected_ptr_type = get_pointer_to_type(ira->codegen, dest_type, false);
|
|
}
|
|
|
|
IrInstGen *casted_result_ptr = ir_implicit_cast(ira, result_ptr, expected_ptr_type);
|
|
if (type_is_invalid(casted_result_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Don't write anything to the result pointer.
|
|
if (dest_type->data.integral.bit_count == 0)
|
|
return ir_const_bool(ira, &instruction->base.base, false);
|
|
|
|
if (instr_is_comptime(casted_op1) &&
|
|
instr_is_comptime(casted_op2) &&
|
|
instr_is_comptime(casted_result_ptr))
|
|
{
|
|
ZigValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *result_val = ir_resolve_const(ira, casted_result_ptr, UndefBad);
|
|
if (result_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
BigInt *op1_bigint = &op1_val->data.x_bigint;
|
|
BigInt *op2_bigint = &op2_val->data.x_bigint;
|
|
ZigValue *pointee_val = const_ptr_pointee(ira, ira->codegen, result_val,
|
|
casted_result_ptr->base.source_node);
|
|
if (pointee_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
BigInt *dest_bigint = &pointee_val->data.x_bigint;
|
|
switch (instruction->op) {
|
|
case IrOverflowOpAdd:
|
|
bigint_add(dest_bigint, op1_bigint, op2_bigint);
|
|
break;
|
|
case IrOverflowOpSub:
|
|
bigint_sub(dest_bigint, op1_bigint, op2_bigint);
|
|
break;
|
|
case IrOverflowOpMul:
|
|
bigint_mul(dest_bigint, op1_bigint, op2_bigint);
|
|
break;
|
|
case IrOverflowOpShl:
|
|
bigint_shl(dest_bigint, op1_bigint, op2_bigint);
|
|
break;
|
|
}
|
|
bool result_bool = false;
|
|
if (!bigint_fits_in_bits(dest_bigint, dest_type->data.integral.bit_count,
|
|
dest_type->data.integral.is_signed))
|
|
{
|
|
result_bool = true;
|
|
BigInt tmp_bigint;
|
|
bigint_init_bigint(&tmp_bigint, dest_bigint);
|
|
bigint_truncate(dest_bigint, &tmp_bigint, dest_type->data.integral.bit_count,
|
|
dest_type->data.integral.is_signed);
|
|
}
|
|
pointee_val->special = ConstValSpecialStatic;
|
|
return ir_const_bool(ira, &instruction->base.base, result_bool);
|
|
}
|
|
|
|
return ir_build_overflow_op_gen(ira, &instruction->base.base, instruction->op,
|
|
casted_op1, casted_op2, casted_result_ptr, dest_type);
|
|
}
|
|
|
|
static void ir_eval_mul_add(IrAnalyze *ira, IrInstSrcMulAdd *source_instr, ZigType *float_type,
|
|
ZigValue *op1, ZigValue *op2, ZigValue *op3, ZigValue *out_val) {
|
|
if (float_type->id == ZigTypeIdComptimeFloat) {
|
|
f128M_mulAdd(&out_val->data.x_bigfloat.value, &op1->data.x_bigfloat.value, &op2->data.x_bigfloat.value,
|
|
&op3->data.x_bigfloat.value);
|
|
} else if (float_type->id == ZigTypeIdFloat) {
|
|
switch (float_type->data.floating.bit_count) {
|
|
case 16:
|
|
out_val->data.x_f16 = f16_mulAdd(op1->data.x_f16, op2->data.x_f16, op3->data.x_f16);
|
|
break;
|
|
case 32:
|
|
out_val->data.x_f32 = fmaf(op1->data.x_f32, op2->data.x_f32, op3->data.x_f32);
|
|
break;
|
|
case 64:
|
|
out_val->data.x_f64 = fma(op1->data.x_f64, op2->data.x_f64, op3->data.x_f64);
|
|
break;
|
|
case 128:
|
|
f128M_mulAdd(&op1->data.x_f128, &op2->data.x_f128, &op3->data.x_f128, &out_val->data.x_f128);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_mul_add(IrAnalyze *ira, IrInstSrcMulAdd *instruction) {
|
|
IrInstGen *type_value = instruction->type_value->child;
|
|
if (type_is_invalid(type_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *expr_type = ir_resolve_type(ira, type_value);
|
|
if (type_is_invalid(expr_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Only allow float types, and vectors of floats.
|
|
ZigType *float_type = (expr_type->id == ZigTypeIdVector) ? expr_type->data.vector.elem_type : expr_type;
|
|
if (float_type->id != ZigTypeIdFloat) {
|
|
ir_add_error(ira, &type_value->base,
|
|
buf_sprintf("expected float or vector of float type, found '%s'", buf_ptr(&float_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *op1 = instruction->op1->child;
|
|
if (type_is_invalid(op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, expr_type);
|
|
if (type_is_invalid(casted_op1->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op2 = instruction->op2->child;
|
|
if (type_is_invalid(op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, expr_type);
|
|
if (type_is_invalid(casted_op2->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op3 = instruction->op3->child;
|
|
if (type_is_invalid(op3->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_op3 = ir_implicit_cast(ira, op3, expr_type);
|
|
if (type_is_invalid(casted_op3->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_op1) &&
|
|
instr_is_comptime(casted_op2) &&
|
|
instr_is_comptime(casted_op3)) {
|
|
ZigValue *op1_const = ir_resolve_const(ira, casted_op1, UndefBad);
|
|
if (!op1_const)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigValue *op2_const = ir_resolve_const(ira, casted_op2, UndefBad);
|
|
if (!op2_const)
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigValue *op3_const = ir_resolve_const(ira, casted_op3, UndefBad);
|
|
if (!op3_const)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, expr_type);
|
|
ZigValue *out_val = result->value;
|
|
|
|
if (expr_type->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, op1_const);
|
|
expand_undef_array(ira->codegen, op2_const);
|
|
expand_undef_array(ira->codegen, op3_const);
|
|
out_val->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, out_val);
|
|
size_t len = expr_type->data.vector.len;
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
ZigValue *float_operand_op1 = &op1_const->data.x_array.data.s_none.elements[i];
|
|
ZigValue *float_operand_op2 = &op2_const->data.x_array.data.s_none.elements[i];
|
|
ZigValue *float_operand_op3 = &op3_const->data.x_array.data.s_none.elements[i];
|
|
ZigValue *float_out_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
assert(float_operand_op1->type == float_type);
|
|
assert(float_operand_op2->type == float_type);
|
|
assert(float_operand_op3->type == float_type);
|
|
assert(float_out_val->type == float_type);
|
|
ir_eval_mul_add(ira, instruction, float_type,
|
|
op1_const, op2_const, op3_const, float_out_val);
|
|
float_out_val->type = float_type;
|
|
}
|
|
out_val->type = expr_type;
|
|
out_val->special = ConstValSpecialStatic;
|
|
} else {
|
|
ir_eval_mul_add(ira, instruction, float_type, op1_const, op2_const, op3_const, out_val);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
return ir_build_mul_add_gen(ira, &instruction->base.base, casted_op1, casted_op2, casted_op3, expr_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstSrcTestErr *instruction) {
|
|
IrInstGen *base_ptr = instruction->base_ptr->child;
|
|
if (type_is_invalid(base_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *value;
|
|
if (instruction->base_ptr_is_payload) {
|
|
value = base_ptr;
|
|
} else {
|
|
value = ir_get_deref(ira, &instruction->base.base, base_ptr, nullptr);
|
|
}
|
|
|
|
ZigType *type_entry = value->value->type;
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (type_entry->id == ZigTypeIdErrorUnion) {
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
|
|
if (!err_union_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (err_union_val->special != ConstValSpecialRuntime) {
|
|
ErrorTableEntry *err = err_union_val->data.x_err_union.error_set->data.x_err_set;
|
|
return ir_const_bool(ira, &instruction->base.base, (err != nullptr));
|
|
}
|
|
}
|
|
|
|
if (instruction->resolve_err_set) {
|
|
ZigType *err_set_type = type_entry->data.error_union.err_set_type;
|
|
if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (!type_is_global_error_set(err_set_type) &&
|
|
err_set_type->data.error_set.err_count == 0)
|
|
{
|
|
assert(!err_set_type->data.error_set.incomplete);
|
|
return ir_const_bool(ira, &instruction->base.base, false);
|
|
}
|
|
}
|
|
|
|
return ir_build_test_err_gen(ira, &instruction->base.base, value);
|
|
} else if (type_entry->id == ZigTypeIdErrorSet) {
|
|
return ir_const_bool(ira, &instruction->base.base, true);
|
|
} else {
|
|
return ir_const_bool(ira, &instruction->base.base, false);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool initializing)
|
|
{
|
|
ZigType *ptr_type = base_ptr->value->type;
|
|
|
|
// This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing.
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
|
|
ZigType *type_entry = ptr_type->data.pointer.child_type;
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (type_entry->id != ZigTypeIdErrorUnion) {
|
|
ir_add_error(ira, &base_ptr->base,
|
|
buf_sprintf("expected error union type, found '%s'", buf_ptr(&type_entry->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *err_set_type = type_entry->data.error_union.err_set_type;
|
|
ZigType *result_type = get_pointer_to_type_extra(ira->codegen, err_set_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, PtrLenSingle,
|
|
ptr_type->data.pointer.explicit_alignment, 0, 0, false);
|
|
|
|
if (instr_is_comptime(base_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad);
|
|
if (!ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar &&
|
|
ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr)
|
|
{
|
|
ZigValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
|
|
if (err_union_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (initializing && err_union_val->special == ConstValSpecialUndef) {
|
|
ZigValue *vals = ira->codegen->pass1_arena->allocate<ZigValue>(2);
|
|
ZigValue *err_set_val = &vals[0];
|
|
ZigValue *payload_val = &vals[1];
|
|
|
|
err_set_val->special = ConstValSpecialUndef;
|
|
err_set_val->type = err_set_type;
|
|
err_set_val->parent.id = ConstParentIdErrUnionCode;
|
|
err_set_val->parent.data.p_err_union_code.err_union_val = err_union_val;
|
|
|
|
payload_val->special = ConstValSpecialUndef;
|
|
payload_val->type = type_entry->data.error_union.payload_type;
|
|
payload_val->parent.id = ConstParentIdErrUnionPayload;
|
|
payload_val->parent.data.p_err_union_payload.err_union_val = err_union_val;
|
|
|
|
err_union_val->special = ConstValSpecialStatic;
|
|
err_union_val->data.x_err_union.error_set = err_set_val;
|
|
err_union_val->data.x_err_union.payload = payload_val;
|
|
}
|
|
ir_assert(err_union_val->special != ConstValSpecialRuntime, source_instr);
|
|
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_unwrap_err_code_gen(ira, source_instr->scope,
|
|
source_instr->source_node, base_ptr, result_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, source_instr, result_type);
|
|
}
|
|
ZigValue *const_val = result->value;
|
|
const_val->data.x_ptr.special = ConstPtrSpecialBaseErrorUnionCode;
|
|
const_val->data.x_ptr.data.base_err_union_code.err_union_val = err_union_val;
|
|
const_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_unwrap_err_code_gen(ira, source_instr->scope, source_instr->source_node, base_ptr, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_unwrap_err_code(IrAnalyze *ira, IrInstSrcUnwrapErrCode *instruction) {
|
|
IrInstGen *base_ptr = instruction->err_union_ptr->child;
|
|
if (type_is_invalid(base_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_analyze_unwrap_err_code(ira, &instruction->base.base, base_ptr, false);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *base_ptr, bool safety_check_on, bool initializing)
|
|
{
|
|
ZigType *ptr_type = base_ptr->value->type;
|
|
|
|
// This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing.
|
|
assert(ptr_type->id == ZigTypeIdPointer);
|
|
|
|
ZigType *type_entry = ptr_type->data.pointer.child_type;
|
|
if (type_is_invalid(type_entry))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (type_entry->id != ZigTypeIdErrorUnion) {
|
|
ir_add_error(ira, &base_ptr->base,
|
|
buf_sprintf("expected error union type, found '%s'", buf_ptr(&type_entry->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *payload_type = type_entry->data.error_union.payload_type;
|
|
if (type_is_invalid(payload_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *result_type = get_pointer_to_type_extra(ira->codegen, payload_type,
|
|
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
|
PtrLenSingle, 0, 0, 0, false);
|
|
|
|
if (instr_is_comptime(base_ptr)) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad);
|
|
if (!ptr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
|
|
ZigValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
|
|
if (err_union_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (initializing && err_union_val->special == ConstValSpecialUndef) {
|
|
ZigValue *vals = ira->codegen->pass1_arena->allocate<ZigValue>(2);
|
|
ZigValue *err_set_val = &vals[0];
|
|
ZigValue *payload_val = &vals[1];
|
|
|
|
err_set_val->special = ConstValSpecialStatic;
|
|
err_set_val->type = type_entry->data.error_union.err_set_type;
|
|
err_set_val->data.x_err_set = nullptr;
|
|
|
|
payload_val->special = ConstValSpecialUndef;
|
|
payload_val->type = payload_type;
|
|
|
|
err_union_val->special = ConstValSpecialStatic;
|
|
err_union_val->data.x_err_union.error_set = err_set_val;
|
|
err_union_val->data.x_err_union.payload = payload_val;
|
|
}
|
|
|
|
if (err_union_val->special != ConstValSpecialRuntime) {
|
|
ErrorTableEntry *err = err_union_val->data.x_err_union.error_set->data.x_err_set;
|
|
if (err != nullptr) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("caught unexpected error '%s'", buf_ptr(&err->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result;
|
|
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_unwrap_err_payload_gen(ira, source_instr->scope,
|
|
source_instr->source_node, base_ptr, safety_check_on, initializing, result_type);
|
|
result->value->special = ConstValSpecialStatic;
|
|
} else {
|
|
result = ir_const(ira, source_instr, result_type);
|
|
}
|
|
result->value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result->value->data.x_ptr.data.ref.pointee = err_union_val->data.x_err_union.payload;
|
|
result->value->data.x_ptr.mut = ptr_val->data.x_ptr.mut;
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
return ir_build_unwrap_err_payload_gen(ira, source_instr->scope, source_instr->source_node,
|
|
base_ptr, safety_check_on, initializing, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
|
|
IrInstSrcUnwrapErrPayload *instruction)
|
|
{
|
|
assert(instruction->value->child);
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_unwrap_error_payload(ira, &instruction->base.base, value, instruction->safety_check_on, false);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstSrcFnProto *instruction) {
|
|
AstNode *proto_node = instruction->base.base.source_node;
|
|
assert(proto_node->type == NodeTypeFnProto);
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValueFnType *lazy_fn_type = heap::c_allocator.create<LazyValueFnType>();
|
|
lazy_fn_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_fn_type->base;
|
|
lazy_fn_type->base.id = LazyValueIdFnType;
|
|
|
|
if (proto_node->data.fn_proto.auto_err_set) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("inferring error set of return type valid only for function definitions"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_fn_type->cc = cc_from_fn_proto(&proto_node->data.fn_proto);
|
|
if (instruction->callconv_value != nullptr) {
|
|
ZigType *cc_enum_type = get_builtin_type(ira->codegen, "CallingConvention");
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, instruction->callconv_value->child, cc_enum_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *const_value = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (const_value == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
lazy_fn_type->cc = (CallingConvention)bigint_as_u32(&const_value->data.x_enum_tag);
|
|
}
|
|
|
|
size_t param_count = proto_node->data.fn_proto.params.length;
|
|
lazy_fn_type->proto_node = proto_node;
|
|
lazy_fn_type->param_types = heap::c_allocator.allocate<IrInstGen *>(param_count);
|
|
|
|
for (size_t param_index = 0; param_index < param_count; param_index += 1) {
|
|
AstNode *param_node = proto_node->data.fn_proto.params.at(param_index);
|
|
assert(param_node->type == NodeTypeParamDecl);
|
|
|
|
bool param_is_var_args = param_node->data.param_decl.is_var_args;
|
|
if (param_is_var_args) {
|
|
const CallingConvention cc = lazy_fn_type->cc;
|
|
|
|
if (cc == CallingConventionC) {
|
|
break;
|
|
} else {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("var args only allowed in functions with C calling convention"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (instruction->param_types[param_index] == nullptr) {
|
|
lazy_fn_type->is_generic = true;
|
|
return result;
|
|
}
|
|
|
|
IrInstGen *param_type_value = instruction->param_types[param_index]->child;
|
|
if (type_is_invalid(param_type_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ir_resolve_const(ira, param_type_value, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
lazy_fn_type->param_types[param_index] = param_type_value;
|
|
}
|
|
|
|
if (instruction->align_value != nullptr) {
|
|
lazy_fn_type->align_inst = instruction->align_value->child;
|
|
if (ir_resolve_const(ira, lazy_fn_type->align_inst, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_fn_type->return_type = instruction->return_type->child;
|
|
if (ir_resolve_const(ira, lazy_fn_type->return_type, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_test_comptime(IrAnalyze *ira, IrInstSrcTestComptime *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_bool(ira, &instruction->base.base, instr_is_comptime(value));
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
|
|
IrInstSrcCheckSwitchProngs *instruction)
|
|
{
|
|
IrInstGen *target_value = instruction->target_value->child;
|
|
ZigType *switch_type = target_value->value->type;
|
|
if (type_is_invalid(switch_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *original_value = ((IrInstSrcSwitchTarget *)(instruction->target_value))->target_value_ptr->child->value;
|
|
bool target_is_originally_union = original_value->type->id == ZigTypeIdPointer &&
|
|
original_value->type->data.pointer.child_type->id == ZigTypeIdUnion;
|
|
|
|
if (switch_type->id == ZigTypeIdEnum) {
|
|
HashMap<BigInt, AstNode *, bigint_hash, bigint_eql> field_prev_uses = {};
|
|
field_prev_uses.init(switch_type->data.enumeration.src_field_count);
|
|
|
|
for (size_t range_i = 0; range_i < instruction->range_count; range_i += 1) {
|
|
IrInstSrcCheckSwitchProngsRange *range = &instruction->ranges[range_i];
|
|
|
|
IrInstGen *start_value_uncasted = range->start->child;
|
|
if (type_is_invalid(start_value_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *start_value = ir_implicit_cast(ira, start_value_uncasted, switch_type);
|
|
if (type_is_invalid(start_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *end_value_uncasted = range->end->child;
|
|
if (type_is_invalid(end_value_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *end_value = ir_implicit_cast(ira, end_value_uncasted, switch_type);
|
|
if (type_is_invalid(end_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(start_value->value->type->id == ZigTypeIdEnum);
|
|
BigInt start_index;
|
|
bigint_init_bigint(&start_index, &start_value->value->data.x_enum_tag);
|
|
|
|
assert(end_value->value->type->id == ZigTypeIdEnum);
|
|
BigInt end_index;
|
|
bigint_init_bigint(&end_index, &end_value->value->data.x_enum_tag);
|
|
|
|
if (bigint_cmp(&start_index, &end_index) == CmpGT) {
|
|
ir_add_error(ira, &start_value->base,
|
|
buf_sprintf("range start value is greater than the end value"));
|
|
}
|
|
|
|
BigInt field_index;
|
|
bigint_init_bigint(&field_index, &start_index);
|
|
for (;;) {
|
|
Cmp cmp = bigint_cmp(&field_index, &end_index);
|
|
if (cmp == CmpGT) {
|
|
break;
|
|
}
|
|
auto entry = field_prev_uses.put_unique(field_index, start_value->base.source_node);
|
|
if (entry) {
|
|
AstNode *prev_node = entry->value;
|
|
TypeEnumField *enum_field = find_enum_field_by_tag(switch_type, &field_index);
|
|
assert(enum_field != nullptr);
|
|
ErrorMsg *msg = ir_add_error(ira, &start_value->base,
|
|
buf_sprintf("duplicate switch value: '%s.%s'", buf_ptr(&switch_type->name),
|
|
buf_ptr(enum_field->name)));
|
|
add_error_note(ira->codegen, msg, prev_node, buf_sprintf("other value is here"));
|
|
}
|
|
bigint_incr(&field_index);
|
|
}
|
|
}
|
|
if (instruction->have_underscore_prong) {
|
|
if (!switch_type->data.enumeration.non_exhaustive) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("switch on exhaustive enum has `_` prong"));
|
|
} else if (target_is_originally_union) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("`_` prong not allowed when switching on tagged union"));
|
|
}
|
|
for (uint32_t i = 0; i < switch_type->data.enumeration.src_field_count; i += 1) {
|
|
TypeEnumField *enum_field = &switch_type->data.enumeration.fields[i];
|
|
if (buf_eql_str(enum_field->name, "_"))
|
|
continue;
|
|
|
|
auto entry = field_prev_uses.maybe_get(enum_field->value);
|
|
if (!entry) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("enumeration value '%s.%s' not handled in switch", buf_ptr(&switch_type->name),
|
|
buf_ptr(enum_field->name)));
|
|
}
|
|
}
|
|
} else if (instruction->else_prong == nullptr) {
|
|
if (switch_type->data.enumeration.non_exhaustive && !target_is_originally_union) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("switch on non-exhaustive enum must include `else` or `_` prong"));
|
|
}
|
|
for (uint32_t i = 0; i < switch_type->data.enumeration.src_field_count; i += 1) {
|
|
TypeEnumField *enum_field = &switch_type->data.enumeration.fields[i];
|
|
|
|
auto entry = field_prev_uses.maybe_get(enum_field->value);
|
|
if (!entry) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("enumeration value '%s.%s' not handled in switch", buf_ptr(&switch_type->name),
|
|
buf_ptr(enum_field->name)));
|
|
}
|
|
}
|
|
} else if(!switch_type->data.enumeration.non_exhaustive && switch_type->data.enumeration.src_field_count == instruction->range_count) {
|
|
ir_add_error_node(ira, instruction->else_prong,
|
|
buf_sprintf("unreachable else prong, all cases already handled"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (switch_type->id == ZigTypeIdErrorSet) {
|
|
if (!resolve_inferred_error_set(ira->codegen, switch_type, target_value->base.source_node)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
size_t field_prev_uses_count = ira->codegen->errors_by_index.length;
|
|
AstNode **field_prev_uses = heap::c_allocator.allocate<AstNode *>(field_prev_uses_count);
|
|
|
|
for (size_t range_i = 0; range_i < instruction->range_count; range_i += 1) {
|
|
IrInstSrcCheckSwitchProngsRange *range = &instruction->ranges[range_i];
|
|
|
|
IrInstGen *start_value_uncasted = range->start->child;
|
|
if (type_is_invalid(start_value_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *start_value = ir_implicit_cast(ira, start_value_uncasted, switch_type);
|
|
if (type_is_invalid(start_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *end_value_uncasted = range->end->child;
|
|
if (type_is_invalid(end_value_uncasted->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *end_value = ir_implicit_cast(ira, end_value_uncasted, switch_type);
|
|
if (type_is_invalid(end_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ir_assert(start_value->value->type->id == ZigTypeIdErrorSet, &instruction->base.base);
|
|
uint32_t start_index = start_value->value->data.x_err_set->value;
|
|
|
|
ir_assert(end_value->value->type->id == ZigTypeIdErrorSet, &instruction->base.base);
|
|
uint32_t end_index = end_value->value->data.x_err_set->value;
|
|
|
|
if (start_index != end_index) {
|
|
ir_add_error(ira, &end_value->base, buf_sprintf("ranges not allowed when switching on errors"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
AstNode *prev_node = field_prev_uses[start_index];
|
|
if (prev_node != nullptr) {
|
|
Buf *err_name = &ira->codegen->errors_by_index.at(start_index)->name;
|
|
ErrorMsg *msg = ir_add_error(ira, &start_value->base,
|
|
buf_sprintf("duplicate switch value: '%s.%s'", buf_ptr(&switch_type->name), buf_ptr(err_name)));
|
|
add_error_note(ira->codegen, msg, prev_node, buf_sprintf("other value is here"));
|
|
}
|
|
field_prev_uses[start_index] = start_value->base.source_node;
|
|
}
|
|
if (instruction->else_prong == nullptr) {
|
|
if (type_is_global_error_set(switch_type)) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("else prong required when switching on type 'anyerror'"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
for (uint32_t i = 0; i < switch_type->data.error_set.err_count; i += 1) {
|
|
ErrorTableEntry *err_entry = switch_type->data.error_set.errors[i];
|
|
|
|
AstNode *prev_node = field_prev_uses[err_entry->value];
|
|
if (prev_node == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("error.%s not handled in switch", buf_ptr(&err_entry->name)));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
heap::c_allocator.deallocate(field_prev_uses, field_prev_uses_count);
|
|
} else if (switch_type->id == ZigTypeIdInt) {
|
|
RangeSet rs = {0};
|
|
for (size_t range_i = 0; range_i < instruction->range_count; range_i += 1) {
|
|
IrInstSrcCheckSwitchProngsRange *range = &instruction->ranges[range_i];
|
|
|
|
IrInstGen *start_value = range->start->child;
|
|
if (type_is_invalid(start_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *casted_start_value = ir_implicit_cast(ira, start_value, switch_type);
|
|
if (type_is_invalid(casted_start_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *end_value = range->end->child;
|
|
if (type_is_invalid(end_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
IrInstGen *casted_end_value = ir_implicit_cast(ira, end_value, switch_type);
|
|
if (type_is_invalid(casted_end_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *start_val = ir_resolve_const(ira, casted_start_value, UndefBad);
|
|
if (!start_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *end_val = ir_resolve_const(ira, casted_end_value, UndefBad);
|
|
if (!end_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(start_val->type->id == ZigTypeIdInt || start_val->type->id == ZigTypeIdComptimeInt);
|
|
assert(end_val->type->id == ZigTypeIdInt || end_val->type->id == ZigTypeIdComptimeInt);
|
|
|
|
if (bigint_cmp(&start_val->data.x_bigint, &end_val->data.x_bigint) == CmpGT) {
|
|
ir_add_error(ira, &start_value->base,
|
|
buf_sprintf("range start value is greater than the end value"));
|
|
}
|
|
|
|
AstNode *prev_node = rangeset_add_range(&rs, &start_val->data.x_bigint, &end_val->data.x_bigint,
|
|
start_value->base.source_node);
|
|
if (prev_node != nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, &start_value->base, buf_sprintf("duplicate switch value"));
|
|
add_error_note(ira->codegen, msg, prev_node, buf_sprintf("previous value is here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
BigInt min_val;
|
|
eval_min_max_value_int(ira->codegen, switch_type, &min_val, false);
|
|
BigInt max_val;
|
|
eval_min_max_value_int(ira->codegen, switch_type, &max_val, true);
|
|
bool handles_all_cases = rangeset_spans(&rs, &min_val, &max_val);
|
|
if (!handles_all_cases && instruction->else_prong == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("switch must handle all possibilities"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if(handles_all_cases && instruction->else_prong != nullptr) {
|
|
ir_add_error_node(ira, instruction->else_prong,
|
|
buf_sprintf("unreachable else prong, all cases already handled"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (switch_type->id == ZigTypeIdBool) {
|
|
int seenTrue = 0;
|
|
int seenFalse = 0;
|
|
for (size_t range_i = 0; range_i < instruction->range_count; range_i += 1) {
|
|
IrInstSrcCheckSwitchProngsRange *range = &instruction->ranges[range_i];
|
|
|
|
IrInstGen *value = range->start->child;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, switch_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *const_expr_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_expr_val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
assert(const_expr_val->type->id == ZigTypeIdBool);
|
|
|
|
if (const_expr_val->data.x_bool == true) {
|
|
seenTrue += 1;
|
|
} else {
|
|
seenFalse += 1;
|
|
}
|
|
|
|
if ((seenTrue > 1) || (seenFalse > 1)) {
|
|
ir_add_error(ira, &value->base, buf_sprintf("duplicate switch value"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
if (((seenTrue < 1) || (seenFalse < 1)) && instruction->else_prong == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("switch must handle all possibilities"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if(seenTrue == 1 && seenFalse == 1 && instruction->else_prong != nullptr) {
|
|
ir_add_error_node(ira, instruction->else_prong,
|
|
buf_sprintf("unreachable else prong, all cases already handled"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (instruction->else_prong == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("else prong required when switching on type '%s'", buf_ptr(&switch_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if(switch_type->id == ZigTypeIdMetaType) {
|
|
HashMap<const ZigType*, IrInstGen*, type_ptr_hash, type_ptr_eql> prevs;
|
|
// HashMap doubles capacity when reaching 60% capacity,
|
|
// because we know the size at init we can avoid reallocation by doubling it here
|
|
prevs.init(instruction->range_count * 2);
|
|
for (size_t range_i = 0; range_i < instruction->range_count; range_i += 1) {
|
|
IrInstSrcCheckSwitchProngsRange *range = &instruction->ranges[range_i];
|
|
|
|
IrInstGen *value = range->start->child;
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, switch_type);
|
|
if (type_is_invalid(casted_value->value->type)) {
|
|
prevs.deinit();
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigValue *const_expr_val = ir_resolve_const(ira, casted_value, UndefBad);
|
|
if (!const_expr_val) {
|
|
prevs.deinit();
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
auto entry = prevs.put_unique(const_expr_val->data.x_type, value);
|
|
if(entry != nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, &value->base, buf_sprintf("duplicate switch value"));
|
|
add_error_note(ira->codegen, msg, entry->value->base.source_node, buf_sprintf("previous value is here"));
|
|
prevs.deinit();
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
prevs.deinit();
|
|
}
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_check_statement_is_void(IrAnalyze *ira,
|
|
IrInstSrcCheckStatementIsVoid *instruction)
|
|
{
|
|
IrInstGen *statement_value = instruction->statement_value->child;
|
|
ZigType *statement_type = statement_value->value->type;
|
|
if (type_is_invalid(statement_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (statement_type->id != ZigTypeIdVoid && statement_type->id != ZigTypeIdUnreachable) {
|
|
if(statement_type->id == ZigTypeIdErrorUnion || statement_type->id == ZigTypeIdErrorSet) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("error is ignored. consider using `try`, `catch`, or `if`"));
|
|
}else{
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("expression value is ignored"));
|
|
}
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstSrcPanic *instruction) {
|
|
IrInstGen *msg = instruction->msg->child;
|
|
if (type_is_invalid(msg->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
if (ir_should_inline(ira->old_irb.exec, instruction->base.base.scope)) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("encountered @panic at compile-time"));
|
|
return ir_unreach_error(ira);
|
|
}
|
|
|
|
ZigType *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown, 0, 0, 0, false);
|
|
ZigType *str_type = get_slice_type(ira->codegen, u8_ptr_type);
|
|
IrInstGen *casted_msg = ir_implicit_cast(ira, msg, str_type);
|
|
if (type_is_invalid(casted_msg->value->type))
|
|
return ir_unreach_error(ira);
|
|
|
|
IrInstGen *new_instruction = ir_build_panic_gen(ira, &instruction->base.base, casted_msg);
|
|
return ir_finish_anal(ira, new_instruction);
|
|
}
|
|
|
|
static IrInstGen *ir_align_cast(IrAnalyze *ira, IrInstGen *target, uint32_t align_bytes, bool safety_check_on) {
|
|
Error err;
|
|
|
|
ZigType *target_type = target->value->type;
|
|
assert(!type_is_invalid(target_type));
|
|
|
|
ZigType *result_type;
|
|
uint32_t old_align_bytes;
|
|
|
|
ZigType *actual_ptr = target_type;
|
|
if (actual_ptr->id == ZigTypeIdOptional) {
|
|
actual_ptr = actual_ptr->data.maybe.child_type;
|
|
} else if (is_slice(actual_ptr)) {
|
|
actual_ptr = actual_ptr->data.structure.fields[slice_ptr_index]->type_entry;
|
|
}
|
|
|
|
if (safety_check_on && !type_has_bits(ira->codegen, actual_ptr)) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("cannot adjust alignment of zero sized type '%s'", buf_ptr(&target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (target_type->id == ZigTypeIdPointer) {
|
|
result_type = adjust_ptr_align(ira->codegen, target_type, align_bytes);
|
|
if ((err = resolve_ptr_align(ira, target_type, &old_align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (target_type->id == ZigTypeIdFn) {
|
|
FnTypeId fn_type_id = target_type->data.fn.fn_type_id;
|
|
old_align_bytes = fn_type_id.alignment;
|
|
fn_type_id.alignment = align_bytes;
|
|
result_type = get_fn_type(ira->codegen, &fn_type_id);
|
|
} else if (target_type->id == ZigTypeIdAnyFrame) {
|
|
if (align_bytes >= target_fn_align(ira->codegen->zig_target)) {
|
|
result_type = target_type;
|
|
} else {
|
|
ir_add_error(ira, &target->base, buf_sprintf("sub-aligned anyframe not allowed"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
} else if (target_type->id == ZigTypeIdOptional &&
|
|
target_type->data.maybe.child_type->id == ZigTypeIdPointer)
|
|
{
|
|
ZigType *ptr_type = target_type->data.maybe.child_type;
|
|
if ((err = resolve_ptr_align(ira, ptr_type, &old_align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigType *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes);
|
|
|
|
result_type = get_optional_type(ira->codegen, better_ptr_type);
|
|
} else if (target_type->id == ZigTypeIdOptional &&
|
|
target_type->data.maybe.child_type->id == ZigTypeIdFn)
|
|
{
|
|
FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id;
|
|
old_align_bytes = fn_type_id.alignment;
|
|
fn_type_id.alignment = align_bytes;
|
|
ZigType *fn_type = get_fn_type(ira->codegen, &fn_type_id);
|
|
result_type = get_optional_type(ira->codegen, fn_type);
|
|
} else if (is_slice(target_type)) {
|
|
ZigType *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
if ((err = resolve_ptr_align(ira, slice_ptr_type, &old_align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigType *result_ptr_type = adjust_ptr_align(ira->codegen, slice_ptr_type, align_bytes);
|
|
result_type = get_slice_type(ira->codegen, result_ptr_type);
|
|
} else {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("expected pointer or slice, found '%s'", buf_ptr(&target_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
|
|
val->data.x_ptr.data.hard_coded_addr.addr % align_bytes != 0)
|
|
{
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("pointer address 0x%" ZIG_PRI_x64 " is not aligned to %" PRIu32 " bytes",
|
|
val->data.x_ptr.data.hard_coded_addr.addr, align_bytes));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, &target->base, result_type);
|
|
copy_const_val(ira->codegen, result->value, val);
|
|
result->value->type = result_type;
|
|
return result;
|
|
}
|
|
|
|
if (safety_check_on && align_bytes > old_align_bytes && align_bytes != 1) {
|
|
return ir_build_align_cast_gen(ira, target->base.scope, target->base.source_node, target, result_type);
|
|
} else {
|
|
return ir_build_cast(ira, &target->base, result_type, target, CastOpNoop);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_ptr_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *ptr,
|
|
IrInst *ptr_src, ZigType *dest_type, IrInst *dest_type_src, bool safety_check_on,
|
|
bool keep_bigger_alignment)
|
|
{
|
|
Error err;
|
|
|
|
ZigType *src_type = ptr->value->type;
|
|
assert(!type_is_invalid(src_type));
|
|
|
|
if (src_type == dest_type) {
|
|
return ptr;
|
|
}
|
|
|
|
// We have a check for zero bits later so we use get_src_ptr_type to
|
|
// validate src_type and dest_type.
|
|
|
|
ZigType *if_slice_ptr_type;
|
|
if (is_slice(src_type)) {
|
|
TypeStructField *ptr_field = src_type->data.structure.fields[slice_ptr_index];
|
|
if_slice_ptr_type = resolve_struct_field_type(ira->codegen, ptr_field);
|
|
} else {
|
|
if_slice_ptr_type = src_type;
|
|
|
|
ZigType *src_ptr_type = get_src_ptr_type(src_type);
|
|
if (src_ptr_type == nullptr) {
|
|
ir_add_error(ira, ptr_src, buf_sprintf("expected pointer, found '%s'", buf_ptr(&src_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
ZigType *dest_ptr_type = get_src_ptr_type(dest_type);
|
|
if (dest_ptr_type == nullptr) {
|
|
ir_add_error(ira, dest_type_src,
|
|
buf_sprintf("expected pointer, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (get_ptr_const(ira->codegen, src_type) && !get_ptr_const(ira->codegen, dest_type)) {
|
|
ir_add_error(ira, source_instr, buf_sprintf("cast discards const qualifier"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
uint32_t dest_align_bytes;
|
|
if ((err = resolve_ptr_align(ira, dest_type, &dest_align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint32_t src_align_bytes = 0;
|
|
if (keep_bigger_alignment || dest_align_bytes != 1) {
|
|
if ((err = resolve_ptr_align(ira, src_type, &src_align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, dest_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if ((err = type_resolve(ira->codegen, src_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (safety_check_on &&
|
|
type_has_bits(ira->codegen, dest_type) &&
|
|
!type_has_bits(ira->codegen, if_slice_ptr_type))
|
|
{
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr,
|
|
buf_sprintf("'%s' and '%s' do not have the same in-memory representation",
|
|
buf_ptr(&src_type->name), buf_ptr(&dest_type->name)));
|
|
add_error_note(ira->codegen, msg, ptr_src->source_node,
|
|
buf_sprintf("'%s' has no in-memory bits", buf_ptr(&src_type->name)));
|
|
add_error_note(ira->codegen, msg, dest_type_src->source_node,
|
|
buf_sprintf("'%s' has in-memory bits", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// For slices, follow the `ptr` field.
|
|
if (is_slice(src_type)) {
|
|
TypeStructField *ptr_field = src_type->data.structure.fields[slice_ptr_index];
|
|
IrInstGen *ptr_ref = ir_get_ref(ira, source_instr, ptr, true, false);
|
|
IrInstGen *ptr_ptr = ir_analyze_struct_field_ptr(ira, source_instr, ptr_field, ptr_ref, src_type, false);
|
|
ptr = ir_get_deref(ira, source_instr, ptr_ptr, nullptr);
|
|
}
|
|
|
|
if (instr_is_comptime(ptr)) {
|
|
bool dest_allows_addr_zero = ptr_allows_addr_zero(dest_type);
|
|
UndefAllowed is_undef_allowed = dest_allows_addr_zero ? UndefOk : UndefBad;
|
|
ZigValue *val = ir_resolve_const(ira, ptr, is_undef_allowed);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (value_is_comptime(val) && val->special != ConstValSpecialUndef) {
|
|
bool is_addr_zero = val->data.x_ptr.special == ConstPtrSpecialNull ||
|
|
(val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
|
|
val->data.x_ptr.data.hard_coded_addr.addr == 0);
|
|
if (is_addr_zero && !dest_allows_addr_zero) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("null pointer casted to type '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
IrInstGen *result;
|
|
if (val->data.x_ptr.mut == ConstPtrMutInfer) {
|
|
result = ir_build_ptr_cast_gen(ira, source_instr, dest_type, ptr, safety_check_on);
|
|
} else {
|
|
result = ir_const(ira, source_instr, dest_type);
|
|
}
|
|
InferredStructField *isf = (val->type->id == ZigTypeIdPointer) ?
|
|
val->type->data.pointer.inferred_struct_field : nullptr;
|
|
if (isf == nullptr) {
|
|
copy_const_val(ira->codegen, result->value, val);
|
|
} else {
|
|
// The destination value should have x_ptr struct pointing to underlying struct value
|
|
result->value->data.x_ptr.mut = val->data.x_ptr.mut;
|
|
TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
|
|
assert(field != nullptr);
|
|
if (field->is_comptime) {
|
|
result->value->data.x_ptr.special = ConstPtrSpecialRef;
|
|
result->value->data.x_ptr.data.ref.pointee = field->init_val;
|
|
} else {
|
|
assert(val->data.x_ptr.special == ConstPtrSpecialRef);
|
|
result->value->data.x_ptr.special = ConstPtrSpecialBaseStruct;
|
|
result->value->data.x_ptr.data.base_struct.struct_val = val->data.x_ptr.data.ref.pointee;
|
|
result->value->data.x_ptr.data.base_struct.field_index = field->src_index;
|
|
}
|
|
result->value->special = ConstValSpecialStatic;
|
|
}
|
|
result->value->type = dest_type;
|
|
|
|
// Keep the bigger alignment, it can only help- unless the target is zero bits.
|
|
if (keep_bigger_alignment && src_align_bytes > dest_align_bytes && type_has_bits(ira->codegen, dest_type)) {
|
|
result = ir_align_cast(ira, result, src_align_bytes, false);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
if (src_align_bytes != 0 && dest_align_bytes > src_align_bytes) {
|
|
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
|
|
add_error_note(ira->codegen, msg, ptr_src->source_node,
|
|
buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&src_type->name), src_align_bytes));
|
|
add_error_note(ira->codegen, msg, dest_type_src->source_node,
|
|
buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&dest_type->name), dest_align_bytes));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *casted_ptr = ir_build_ptr_cast_gen(ira, source_instr, dest_type, ptr, safety_check_on);
|
|
|
|
// Keep the bigger alignment, it can only help- unless the target is zero bits.
|
|
IrInstGen *result;
|
|
if (keep_bigger_alignment && src_align_bytes > dest_align_bytes && type_has_bits(ira->codegen, dest_type)) {
|
|
result = ir_align_cast(ira, casted_ptr, src_align_bytes, false);
|
|
if (type_is_invalid(result->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
result = casted_ptr;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_ptr_cast(IrAnalyze *ira, IrInstSrcPtrCast *instruction) {
|
|
IrInstGen *dest_type_value = instruction->dest_type->child;
|
|
ZigType *dest_type = ir_resolve_type(ira, dest_type_value);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *ptr = instruction->ptr->child;
|
|
ZigType *src_type = ptr->value->type;
|
|
if (type_is_invalid(src_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool keep_bigger_alignment = true;
|
|
return ir_analyze_ptr_cast(ira, &instruction->base.base, ptr, &instruction->ptr->base,
|
|
dest_type, &dest_type_value->base, instruction->safety_check_on, keep_bigger_alignment);
|
|
}
|
|
|
|
static void buf_write_value_bytes_array(CodeGen *codegen, uint8_t *buf, ZigValue *val, size_t len) {
|
|
size_t buf_i = 0;
|
|
// TODO optimize the buf case
|
|
expand_undef_array(codegen, val);
|
|
for (size_t elem_i = 0; elem_i < val->type->data.array.len; elem_i += 1) {
|
|
ZigValue *elem = &val->data.x_array.data.s_none.elements[elem_i];
|
|
buf_write_value_bytes(codegen, &buf[buf_i], elem);
|
|
buf_i += type_size(codegen, elem->type);
|
|
}
|
|
if (val->type->id == ZigTypeIdArray && val->type->data.array.sentinel != nullptr) {
|
|
buf_write_value_bytes(codegen, &buf[buf_i], val->type->data.array.sentinel);
|
|
}
|
|
}
|
|
|
|
static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ZigValue *val) {
|
|
if (val->special == ConstValSpecialUndef) {
|
|
expand_undef_struct(codegen, val);
|
|
val->special = ConstValSpecialStatic;
|
|
}
|
|
assert(val->special == ConstValSpecialStatic);
|
|
switch (val->type->id) {
|
|
case ZigTypeIdInvalid:
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
zig_unreachable();
|
|
case ZigTypeIdVoid:
|
|
return;
|
|
case ZigTypeIdBool:
|
|
buf[0] = val->data.x_bool ? 1 : 0;
|
|
return;
|
|
case ZigTypeIdInt:
|
|
bigint_write_twos_complement(&val->data.x_bigint, buf, val->type->data.integral.bit_count,
|
|
codegen->is_big_endian);
|
|
return;
|
|
case ZigTypeIdEnum:
|
|
bigint_write_twos_complement(&val->data.x_enum_tag, buf,
|
|
val->type->data.enumeration.tag_int_type->data.integral.bit_count,
|
|
codegen->is_big_endian);
|
|
return;
|
|
case ZigTypeIdFloat:
|
|
float_write_ieee597(val, buf, codegen->is_big_endian);
|
|
return;
|
|
case ZigTypeIdPointer:
|
|
if (val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
|
|
BigInt bn;
|
|
bigint_init_unsigned(&bn, val->data.x_ptr.data.hard_coded_addr.addr);
|
|
bigint_write_twos_complement(&bn, buf, codegen->builtin_types.entry_usize->data.integral.bit_count, codegen->is_big_endian);
|
|
return;
|
|
} else {
|
|
zig_unreachable();
|
|
}
|
|
case ZigTypeIdArray:
|
|
return buf_write_value_bytes_array(codegen, buf, val, val->type->data.array.len);
|
|
case ZigTypeIdVector:
|
|
return buf_write_value_bytes_array(codegen, buf, val, val->type->data.vector.len);
|
|
case ZigTypeIdStruct:
|
|
switch (val->type->data.structure.layout) {
|
|
case ContainerLayoutAuto:
|
|
zig_unreachable();
|
|
case ContainerLayoutExtern: {
|
|
size_t src_field_count = val->type->data.structure.src_field_count;
|
|
for (size_t field_i = 0; field_i < src_field_count; field_i += 1) {
|
|
TypeStructField *struct_field = val->type->data.structure.fields[field_i];
|
|
if (struct_field->gen_index == SIZE_MAX)
|
|
continue;
|
|
ZigValue *field_val = val->data.x_struct.fields[field_i];
|
|
size_t offset = struct_field->offset;
|
|
buf_write_value_bytes(codegen, buf + offset, field_val);
|
|
}
|
|
return;
|
|
}
|
|
case ContainerLayoutPacked: {
|
|
size_t src_field_count = val->type->data.structure.src_field_count;
|
|
size_t gen_field_count = val->type->data.structure.gen_field_count;
|
|
size_t gen_i = 0;
|
|
size_t src_i = 0;
|
|
size_t offset = 0;
|
|
bool is_big_endian = codegen->is_big_endian;
|
|
uint8_t child_buf_prealloc[16];
|
|
size_t child_buf_len = 16;
|
|
uint8_t *child_buf = child_buf_prealloc;
|
|
while (gen_i < gen_field_count) {
|
|
size_t big_int_byte_count = val->type->data.structure.host_int_bytes[gen_i];
|
|
if (big_int_byte_count > child_buf_len) {
|
|
child_buf = heap::c_allocator.allocate_nonzero<uint8_t>(big_int_byte_count);
|
|
child_buf_len = big_int_byte_count;
|
|
}
|
|
BigInt big_int;
|
|
bigint_init_unsigned(&big_int, 0);
|
|
size_t used_bits = 0;
|
|
while (src_i < src_field_count) {
|
|
TypeStructField *field = val->type->data.structure.fields[src_i];
|
|
assert(field->gen_index != SIZE_MAX);
|
|
if (field->gen_index != gen_i)
|
|
break;
|
|
uint32_t packed_bits_size = type_size_bits(codegen, field->type_entry);
|
|
buf_write_value_bytes(codegen, child_buf, val->data.x_struct.fields[src_i]);
|
|
BigInt child_val;
|
|
bigint_read_twos_complement(&child_val, child_buf, packed_bits_size, is_big_endian,
|
|
false);
|
|
if (is_big_endian) {
|
|
BigInt shift_amt;
|
|
bigint_init_unsigned(&shift_amt, packed_bits_size);
|
|
BigInt shifted;
|
|
bigint_shl(&shifted, &big_int, &shift_amt);
|
|
bigint_or(&big_int, &shifted, &child_val);
|
|
} else {
|
|
BigInt shift_amt;
|
|
bigint_init_unsigned(&shift_amt, used_bits);
|
|
BigInt child_val_shifted;
|
|
bigint_shl(&child_val_shifted, &child_val, &shift_amt);
|
|
BigInt tmp;
|
|
bigint_or(&tmp, &big_int, &child_val_shifted);
|
|
big_int = tmp;
|
|
used_bits += packed_bits_size;
|
|
}
|
|
src_i += 1;
|
|
}
|
|
bigint_write_twos_complement(&big_int, buf + offset, big_int_byte_count * 8, is_big_endian);
|
|
offset += big_int_byte_count;
|
|
gen_i += 1;
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
case ZigTypeIdOptional:
|
|
zig_panic("TODO buf_write_value_bytes maybe type");
|
|
case ZigTypeIdFn:
|
|
zig_panic("TODO buf_write_value_bytes fn type");
|
|
case ZigTypeIdUnion:
|
|
zig_panic("TODO buf_write_value_bytes union type");
|
|
case ZigTypeIdFnFrame:
|
|
zig_panic("TODO buf_write_value_bytes async fn frame type");
|
|
case ZigTypeIdAnyFrame:
|
|
zig_panic("TODO buf_write_value_bytes anyframe type");
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static Error buf_read_value_bytes_array(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, uint8_t *buf,
|
|
ZigValue *val, ZigType *elem_type, size_t len)
|
|
{
|
|
Error err;
|
|
uint64_t elem_size = type_size(codegen, elem_type);
|
|
|
|
switch (val->data.x_array.special) {
|
|
case ConstArraySpecialNone:
|
|
val->data.x_array.data.s_none.elements = codegen->pass1_arena->allocate<ZigValue>(len);
|
|
for (size_t i = 0; i < len; i++) {
|
|
ZigValue *elem = &val->data.x_array.data.s_none.elements[i];
|
|
elem->special = ConstValSpecialStatic;
|
|
elem->type = elem_type;
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, buf + (elem_size * i), elem)))
|
|
return err;
|
|
}
|
|
return ErrorNone;
|
|
case ConstArraySpecialUndef:
|
|
zig_panic("TODO buf_read_value_bytes ConstArraySpecialUndef array type");
|
|
case ConstArraySpecialBuf:
|
|
zig_panic("TODO buf_read_value_bytes ConstArraySpecialBuf array type");
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, uint8_t *buf, ZigValue *val) {
|
|
Error err;
|
|
src_assert(val->special == ConstValSpecialStatic, source_node);
|
|
switch (val->type->id) {
|
|
case ZigTypeIdInvalid:
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdOpaque:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
zig_unreachable();
|
|
case ZigTypeIdVoid:
|
|
return ErrorNone;
|
|
case ZigTypeIdBool:
|
|
val->data.x_bool = (buf[0] != 0);
|
|
return ErrorNone;
|
|
case ZigTypeIdInt:
|
|
bigint_read_twos_complement(&val->data.x_bigint, buf, val->type->data.integral.bit_count,
|
|
codegen->is_big_endian, val->type->data.integral.is_signed);
|
|
return ErrorNone;
|
|
case ZigTypeIdFloat:
|
|
float_read_ieee597(val, buf, codegen->is_big_endian);
|
|
return ErrorNone;
|
|
case ZigTypeIdPointer:
|
|
{
|
|
val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
|
|
BigInt bn;
|
|
bigint_read_twos_complement(&bn, buf, codegen->builtin_types.entry_usize->data.integral.bit_count,
|
|
codegen->is_big_endian, false);
|
|
val->data.x_ptr.data.hard_coded_addr.addr = bigint_as_usize(&bn);
|
|
return ErrorNone;
|
|
}
|
|
case ZigTypeIdArray:
|
|
return buf_read_value_bytes_array(ira, codegen, source_node, buf, val, val->type->data.array.child_type,
|
|
val->type->data.array.len);
|
|
case ZigTypeIdVector:
|
|
return buf_read_value_bytes_array(ira, codegen, source_node, buf, val, val->type->data.vector.elem_type,
|
|
val->type->data.vector.len);
|
|
case ZigTypeIdEnum:
|
|
switch (val->type->data.enumeration.layout) {
|
|
case ContainerLayoutAuto:
|
|
zig_panic("TODO buf_read_value_bytes enum auto");
|
|
case ContainerLayoutPacked:
|
|
zig_panic("TODO buf_read_value_bytes enum packed");
|
|
case ContainerLayoutExtern: {
|
|
ZigType *tag_int_type = val->type->data.enumeration.tag_int_type;
|
|
src_assert(tag_int_type->id == ZigTypeIdInt, source_node);
|
|
bigint_read_twos_complement(&val->data.x_enum_tag, buf, tag_int_type->data.integral.bit_count,
|
|
codegen->is_big_endian, tag_int_type->data.integral.is_signed);
|
|
return ErrorNone;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
case ZigTypeIdStruct:
|
|
switch (val->type->data.structure.layout) {
|
|
case ContainerLayoutAuto: {
|
|
switch(val->type->data.structure.special){
|
|
case StructSpecialNone:
|
|
case StructSpecialInferredTuple:
|
|
case StructSpecialInferredStruct: {
|
|
ErrorMsg *msg = opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("non-extern, non-packed struct '%s' cannot have its bytes reinterpreted",
|
|
buf_ptr(&val->type->name)));
|
|
add_error_note(codegen, msg, val->type->data.structure.decl_node,
|
|
buf_sprintf("declared here"));
|
|
break;
|
|
}
|
|
case StructSpecialSlice: {
|
|
opt_ir_add_error_node(ira, codegen, source_node,
|
|
buf_sprintf("slice '%s' cannot have its bytes reinterpreted",
|
|
buf_ptr(&val->type->name)));
|
|
break;
|
|
}
|
|
}
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
case ContainerLayoutExtern: {
|
|
size_t src_field_count = val->type->data.structure.src_field_count;
|
|
val->data.x_struct.fields = alloc_const_vals_ptrs(codegen, src_field_count);
|
|
for (size_t field_i = 0; field_i < src_field_count; field_i += 1) {
|
|
ZigValue *field_val = val->data.x_struct.fields[field_i];
|
|
field_val->special = ConstValSpecialStatic;
|
|
TypeStructField *struct_field = val->type->data.structure.fields[field_i];
|
|
field_val->type = struct_field->type_entry;
|
|
if (struct_field->gen_index == SIZE_MAX)
|
|
continue;
|
|
size_t offset = struct_field->offset;
|
|
uint8_t *new_buf = buf + offset;
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, new_buf, field_val)))
|
|
return err;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
case ContainerLayoutPacked: {
|
|
size_t src_field_count = val->type->data.structure.src_field_count;
|
|
val->data.x_struct.fields = alloc_const_vals_ptrs(codegen, src_field_count);
|
|
size_t gen_field_count = val->type->data.structure.gen_field_count;
|
|
size_t gen_i = 0;
|
|
size_t src_i = 0;
|
|
size_t offset = 0;
|
|
bool is_big_endian = codegen->is_big_endian;
|
|
uint8_t child_buf_prealloc[16];
|
|
size_t child_buf_len = 16;
|
|
uint8_t *child_buf = child_buf_prealloc;
|
|
while (gen_i < gen_field_count) {
|
|
size_t big_int_byte_count = val->type->data.structure.host_int_bytes[gen_i];
|
|
if (big_int_byte_count > child_buf_len) {
|
|
child_buf = heap::c_allocator.allocate_nonzero<uint8_t>(big_int_byte_count);
|
|
child_buf_len = big_int_byte_count;
|
|
}
|
|
BigInt big_int;
|
|
bigint_read_twos_complement(&big_int, buf + offset, big_int_byte_count * 8, is_big_endian, false);
|
|
uint64_t bit_offset = 0;
|
|
while (src_i < src_field_count) {
|
|
TypeStructField *field = val->type->data.structure.fields[src_i];
|
|
src_assert(field->gen_index != SIZE_MAX, source_node);
|
|
if (field->gen_index != gen_i)
|
|
break;
|
|
ZigValue *field_val = val->data.x_struct.fields[src_i];
|
|
field_val->special = ConstValSpecialStatic;
|
|
field_val->type = field->type_entry;
|
|
uint32_t packed_bits_size = type_size_bits(codegen, field->type_entry);
|
|
|
|
BigInt child_val;
|
|
if (is_big_endian) {
|
|
BigInt packed_bits_size_bi;
|
|
bigint_init_unsigned(&packed_bits_size_bi, big_int_byte_count * 8 - packed_bits_size - bit_offset);
|
|
BigInt tmp;
|
|
bigint_shr(&tmp, &big_int, &packed_bits_size_bi);
|
|
bigint_truncate(&child_val, &tmp, packed_bits_size, false);
|
|
} else {
|
|
BigInt packed_bits_size_bi;
|
|
bigint_init_unsigned(&packed_bits_size_bi, packed_bits_size);
|
|
bigint_truncate(&child_val, &big_int, packed_bits_size, false);
|
|
BigInt tmp;
|
|
bigint_shr(&tmp, &big_int, &packed_bits_size_bi);
|
|
big_int = tmp;
|
|
}
|
|
|
|
bigint_write_twos_complement(&child_val, child_buf, packed_bits_size, is_big_endian);
|
|
if ((err = buf_read_value_bytes(ira, codegen, source_node, child_buf, field_val))) {
|
|
return err;
|
|
}
|
|
|
|
bit_offset += packed_bits_size;
|
|
src_i += 1;
|
|
}
|
|
offset += big_int_byte_count;
|
|
gen_i += 1;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
case ZigTypeIdOptional:
|
|
zig_panic("TODO buf_read_value_bytes maybe type");
|
|
case ZigTypeIdErrorUnion:
|
|
zig_panic("TODO buf_read_value_bytes error union");
|
|
case ZigTypeIdErrorSet:
|
|
zig_panic("TODO buf_read_value_bytes pure error type");
|
|
case ZigTypeIdFn:
|
|
zig_panic("TODO buf_read_value_bytes fn type");
|
|
case ZigTypeIdUnion:
|
|
zig_panic("TODO buf_read_value_bytes union type");
|
|
case ZigTypeIdFnFrame:
|
|
zig_panic("TODO buf_read_value_bytes async fn frame type");
|
|
case ZigTypeIdAnyFrame:
|
|
zig_panic("TODO buf_read_value_bytes anyframe type");
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_bit_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
|
|
ZigType *dest_type)
|
|
{
|
|
Error err;
|
|
|
|
ZigType *src_type = value->value->type;
|
|
ir_assert(type_can_bit_cast(src_type), source_instr);
|
|
ir_assert(type_can_bit_cast(dest_type), source_instr);
|
|
|
|
if (dest_type->id == ZigTypeIdEnum) {
|
|
ErrorMsg *msg = ir_add_error_node(ira, source_instr->source_node,
|
|
buf_sprintf("cannot cast a value of type '%s'", buf_ptr(&dest_type->name)));
|
|
add_error_note(ira->codegen, msg, source_instr->source_node,
|
|
buf_sprintf("use @intToEnum for type coercion"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, dest_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if ((err = type_resolve(ira->codegen, src_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
const bool src_is_ptr = handle_is_ptr(ira->codegen, src_type);
|
|
const bool dest_is_ptr = handle_is_ptr(ira->codegen, dest_type);
|
|
|
|
const uint64_t dest_size_bytes = type_size(ira->codegen, dest_type);
|
|
const uint64_t src_size_bytes = type_size(ira->codegen, src_type);
|
|
if (dest_size_bytes != src_size_bytes) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("destination type '%s' has size %" ZIG_PRI_u64 " but source type '%s' has size %" ZIG_PRI_u64,
|
|
buf_ptr(&dest_type->name), dest_size_bytes,
|
|
buf_ptr(&src_type->name), src_size_bytes));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
const uint64_t dest_size_bits = type_size_bits(ira->codegen, dest_type);
|
|
const uint64_t src_size_bits = type_size_bits(ira->codegen, src_type);
|
|
if (dest_size_bits != src_size_bits) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("destination type '%s' has %" ZIG_PRI_u64 " bits but source type '%s' has %" ZIG_PRI_u64 " bits",
|
|
buf_ptr(&dest_type->name), dest_size_bits,
|
|
buf_ptr(&src_type->name), src_size_bits));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(value)) {
|
|
ZigValue *val = ir_resolve_const(ira, value, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, dest_type);
|
|
uint8_t *buf = heap::c_allocator.allocate_nonzero<uint8_t>(src_size_bytes);
|
|
buf_write_value_bytes(ira->codegen, buf, val);
|
|
if ((err = buf_read_value_bytes(ira, ira->codegen, source_instr->source_node, buf, result->value)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
heap::c_allocator.deallocate(buf, src_size_bytes);
|
|
return result;
|
|
}
|
|
|
|
if (dest_is_ptr && !src_is_ptr) {
|
|
// Spill the scalar into a local memory location and take its address
|
|
value = ir_get_ref(ira, source_instr, value, false, false);
|
|
}
|
|
|
|
return ir_build_bit_cast_gen(ira, source_instr, value, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_int_to_ptr(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
|
|
ZigType *ptr_type)
|
|
{
|
|
Error err;
|
|
|
|
ir_assert(get_src_ptr_type(ptr_type) != nullptr, source_instr);
|
|
ir_assert(type_has_bits(ira->codegen, ptr_type), source_instr);
|
|
|
|
IrInstGen *casted_int = ir_implicit_cast(ira, target, ira->codegen->builtin_types.entry_usize);
|
|
if (type_is_invalid(casted_int->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instr_is_comptime(casted_int)) {
|
|
ZigValue *val = ir_resolve_const(ira, casted_int, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint64_t addr = bigint_as_u64(&val->data.x_bigint);
|
|
if (!ptr_allows_addr_zero(ptr_type) && addr == 0) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("pointer type '%s' does not allow address zero", buf_ptr(&ptr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
uint32_t align_bytes;
|
|
if ((err = resolve_ptr_align(ira, ptr_type, &align_bytes)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (addr != 0 && addr % align_bytes != 0) {
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("pointer type '%s' requires aligned address",
|
|
buf_ptr(&ptr_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *result = ir_const(ira, source_instr, ptr_type);
|
|
if (ptr_type->id == ZigTypeIdOptional && addr == 0) {
|
|
result->value->data.x_ptr.special = ConstPtrSpecialNull;
|
|
result->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
} else {
|
|
result->value->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
|
|
result->value->data.x_ptr.mut = ConstPtrMutRuntimeVar;
|
|
result->value->data.x_ptr.data.hard_coded_addr.addr = addr;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
return ir_build_int_to_ptr_gen(ira, source_instr->scope, source_instr->source_node, casted_int, ptr_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_int_to_ptr(IrAnalyze *ira, IrInstSrcIntToPtr *instruction) {
|
|
Error err;
|
|
IrInstGen *dest_type_value = instruction->dest_type->child;
|
|
ZigType *dest_type = ir_resolve_type(ira, dest_type_value);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// We explicitly check for the size, so we can use get_src_ptr_type
|
|
if (get_src_ptr_type(dest_type) == nullptr) {
|
|
ir_add_error(ira, &dest_type_value->base, buf_sprintf("expected pointer, found '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
bool has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, dest_type, &has_bits)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!has_bits) {
|
|
ir_add_error(ira, &dest_type_value->base,
|
|
buf_sprintf("type '%s' has 0 bits and cannot store information", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_int_to_ptr(ira, &instruction->base.base, target, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_decl_ref(IrAnalyze *ira, IrInstSrcDeclRef *instruction) {
|
|
IrInstGen *ref_instruction = ir_analyze_decl_ref(ira, &instruction->base.base, instruction->tld);
|
|
if (type_is_invalid(ref_instruction->value->type)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instruction->lval == LValPtr || instruction->lval == LValAssign) {
|
|
return ref_instruction;
|
|
} else {
|
|
return ir_get_deref(ira, &instruction->base.base, ref_instruction, nullptr);
|
|
}
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstSrcPtrToInt *instruction) {
|
|
Error err;
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *usize = ira->codegen->builtin_types.entry_usize;
|
|
|
|
ZigType *src_ptr_type = get_src_ptr_type(target->value->type);
|
|
if (src_ptr_type == nullptr) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("expected pointer, found '%s'", buf_ptr(&target->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
bool has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, src_ptr_type, &has_bits)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!has_bits) {
|
|
ir_add_error(ira, &target->base,
|
|
buf_sprintf("pointer to size 0 type has no address"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(target)) {
|
|
ZigValue *val = ir_resolve_const(ira, target, UndefBad);
|
|
if (!val)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// Since we've already run this type trough get_src_ptr_type it is
|
|
// safe to access the x_ptr fields
|
|
if (val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, usize);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, val->data.x_ptr.data.hard_coded_addr.addr);
|
|
result->value->type = usize;
|
|
return result;
|
|
} else if (val->data.x_ptr.special == ConstPtrSpecialNull) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, usize);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, 0);
|
|
result->value->type = usize;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
return ir_build_ptr_to_int_gen(ira, &instruction->base.base, target);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstSrcPtrType *instruction) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
|
result->value->special = ConstValSpecialLazy;
|
|
|
|
LazyValuePtrType *lazy_ptr_type = heap::c_allocator.create<LazyValuePtrType>();
|
|
lazy_ptr_type->ira = ira; ira_ref(ira);
|
|
result->value->data.x_lazy = &lazy_ptr_type->base;
|
|
lazy_ptr_type->base.id = LazyValueIdPtrType;
|
|
|
|
if (instruction->sentinel != nullptr) {
|
|
if (instruction->ptr_len != PtrLenUnknown) {
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("sentinels are only allowed on unknown-length pointers"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_ptr_type->sentinel = instruction->sentinel->child;
|
|
if (ir_resolve_const(ira, lazy_ptr_type->sentinel, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_ptr_type->elem_type = instruction->child_type->child;
|
|
if (ir_resolve_type_lazy(ira, lazy_ptr_type->elem_type) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (instruction->align_value != nullptr) {
|
|
lazy_ptr_type->align_inst = instruction->align_value->child;
|
|
if (ir_resolve_const(ira, lazy_ptr_type->align_inst, LazyOk) == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
lazy_ptr_type->ptr_len = instruction->ptr_len;
|
|
lazy_ptr_type->is_const = instruction->is_const;
|
|
lazy_ptr_type->is_volatile = instruction->is_volatile;
|
|
lazy_ptr_type->is_allowzero = instruction->is_allow_zero;
|
|
lazy_ptr_type->bit_offset_in_host = instruction->bit_offset_start;
|
|
lazy_ptr_type->host_int_bytes = instruction->host_int_bytes;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_align_cast(IrAnalyze *ira, IrInstSrcAlignCast *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *elem_type = nullptr;
|
|
if (is_slice(target->value->type)) {
|
|
ZigType *slice_ptr_type = target->value->type->data.structure.fields[slice_ptr_index]->type_entry;
|
|
elem_type = slice_ptr_type->data.pointer.child_type;
|
|
} else if (target->value->type->id == ZigTypeIdPointer) {
|
|
elem_type = target->value->type->data.pointer.child_type;
|
|
}
|
|
|
|
uint32_t align_bytes;
|
|
IrInstGen *align_bytes_inst = instruction->align_bytes->child;
|
|
if (!ir_resolve_align(ira, align_bytes_inst, elem_type, &align_bytes))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_align_cast(ira, target, align_bytes, true);
|
|
if (type_is_invalid(result->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return result;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_set_align_stack(IrAnalyze *ira, IrInstSrcSetAlignStack *instruction) {
|
|
uint32_t align_bytes;
|
|
IrInstGen *align_bytes_inst = instruction->align_bytes->child;
|
|
if (!ir_resolve_align(ira, align_bytes_inst, nullptr, &align_bytes))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (align_bytes > 256) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("attempt to @setAlignStack(%" PRIu32 "); maximum is 256", align_bytes));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
if (fn_entry == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("@setAlignStack outside function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionNaked) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("@setAlignStack in naked function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (fn_entry->fn_inline == FnInlineAlways) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("@setAlignStack in inline function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (fn_entry->set_alignstack_node != nullptr) {
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("alignstack set twice"));
|
|
add_error_note(ira->codegen, msg, fn_entry->set_alignstack_node, buf_sprintf("first set here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
fn_entry->set_alignstack_node = instruction->base.base.source_node;
|
|
fn_entry->alignstack_value = align_bytes;
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgType *instruction) {
|
|
IrInstGen *fn_type_inst = instruction->fn_type->child;
|
|
ZigType *fn_type = ir_resolve_type(ira, fn_type_inst);
|
|
if (type_is_invalid(fn_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *arg_index_inst = instruction->arg_index->child;
|
|
uint64_t arg_index;
|
|
if (!ir_resolve_usize(ira, arg_index_inst, &arg_index))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (fn_type->id == ZigTypeIdBoundFn) {
|
|
fn_type = fn_type->data.bound_fn.fn_type;
|
|
arg_index += 1;
|
|
}
|
|
if (fn_type->id != ZigTypeIdFn) {
|
|
ir_add_error(ira, &fn_type_inst->base, buf_sprintf("expected function, found '%s'", buf_ptr(&fn_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
|
|
if (arg_index >= fn_type_id->param_count) {
|
|
if (instruction->allow_var) {
|
|
// TODO remove this with var args
|
|
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_anytype);
|
|
}
|
|
ir_add_error(ira, &arg_index_inst->base,
|
|
buf_sprintf("arg index %" ZIG_PRI_u64 " out of bounds; '%s' has %" ZIG_PRI_usize " argument(s)",
|
|
arg_index, buf_ptr(&fn_type->name), fn_type_id->param_count));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *result_type = fn_type_id->param_info[arg_index].type;
|
|
if (result_type == nullptr) {
|
|
// Args are only unresolved if our function is generic.
|
|
ir_assert(fn_type->data.fn.is_generic, &instruction->base.base);
|
|
|
|
if (instruction->allow_var) {
|
|
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_anytype);
|
|
} else {
|
|
ir_add_error(ira, &arg_index_inst->base,
|
|
buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_u64 " because '%s' is generic",
|
|
arg_index, buf_ptr(&fn_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
return ir_const_type(ira, &instruction->base.base, result_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstSrcTagType *instruction) {
|
|
Error err;
|
|
IrInstGen *target_inst = instruction->target->child;
|
|
ZigType *enum_type = ir_resolve_type(ira, target_inst);
|
|
if (type_is_invalid(enum_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (enum_type->id == ZigTypeIdEnum) {
|
|
if ((err = type_resolve(ira->codegen, enum_type, ResolveStatusSizeKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_const_type(ira, &instruction->base.base, enum_type->data.enumeration.tag_int_type);
|
|
} else if (enum_type->id == ZigTypeIdUnion) {
|
|
ZigType *tag_type = ir_resolve_union_tag_type(ira, instruction->target->base.source_node, enum_type);
|
|
if (type_is_invalid(tag_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_const_type(ira, &instruction->base.base, tag_type);
|
|
} else {
|
|
ir_add_error(ira, &target_inst->base, buf_sprintf("expected enum or union, found '%s'",
|
|
buf_ptr(&enum_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
|
|
ZigType *operand_type = ir_resolve_type(ira, op);
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
|
|
if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) {
|
|
ZigType *int_type;
|
|
if (operand_type->id == ZigTypeIdEnum) {
|
|
int_type = operand_type->data.enumeration.tag_int_type;
|
|
} else {
|
|
int_type = operand_type;
|
|
}
|
|
auto bit_count = int_type->data.integral.bit_count;
|
|
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
|
|
|
|
if (bit_count > max_atomic_bits) {
|
|
ir_add_error(ira, &op->base,
|
|
buf_sprintf("expected %" PRIu32 "-bit integer type or smaller, found %" PRIu32 "-bit integer type",
|
|
max_atomic_bits, bit_count));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
} else if (operand_type->id == ZigTypeIdFloat) {
|
|
uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
|
|
if (operand_type->data.floating.bit_count > max_atomic_bits) {
|
|
ir_add_error(ira, &op->base,
|
|
buf_sprintf("expected %" PRIu32 "-bit float or smaller, found %" PRIu32 "-bit float",
|
|
max_atomic_bits, (uint32_t) operand_type->data.floating.bit_count));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
} else if (operand_type->id == ZigTypeIdBool) {
|
|
// will be treated as u8
|
|
} else {
|
|
Error err;
|
|
ZigType *operand_ptr_type;
|
|
if ((err = get_codegen_ptr_type(ira->codegen, operand_type, &operand_ptr_type)))
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
if (operand_ptr_type == nullptr) {
|
|
ir_add_error(ira, &op->base,
|
|
buf_sprintf("expected bool, integer, float, enum or pointer type, found '%s'",
|
|
buf_ptr(&operand_type->name)));
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
}
|
|
}
|
|
|
|
return operand_type;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAtomicRmw *instruction) {
|
|
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *ptr_inst = instruction->ptr->child;
|
|
if (type_is_invalid(ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// TODO let this be volatile
|
|
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
|
|
IrInstGen *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
|
if (type_is_invalid(casted_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicRmwOp op;
|
|
if (!ir_resolve_atomic_rmw_op(ira, instruction->op->child, &op)) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (operand_type->id == ZigTypeIdEnum && op != AtomicRmwOp_xchg) {
|
|
ir_add_error(ira, &instruction->op->base,
|
|
buf_sprintf("@atomicRmw with enum only allowed with .Xchg"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (operand_type->id == ZigTypeIdBool && op != AtomicRmwOp_xchg) {
|
|
ir_add_error(ira, &instruction->op->base,
|
|
buf_sprintf("@atomicRmw with bool only allowed with .Xchg"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else if (operand_type->id == ZigTypeIdFloat && op > AtomicRmwOp_sub) {
|
|
ir_add_error(ira, &instruction->op->base,
|
|
buf_sprintf("@atomicRmw with float only allowed with .Xchg, .Add and .Sub"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
IrInstGen *operand = instruction->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_operand = ir_implicit_cast(ira, operand, operand_type);
|
|
if (type_is_invalid(casted_operand->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicOrder ordering;
|
|
if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (ordering == AtomicOrderUnordered) {
|
|
ir_add_error(ira, &instruction->ordering->base,
|
|
buf_sprintf("@atomicRmw atomic ordering must not be Unordered"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// special case zero bit types
|
|
switch (type_has_one_possible_value(ira->codegen, operand_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type));
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
IrInst *source_inst = &instruction->base.base;
|
|
if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar) {
|
|
ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
|
|
if (ptr_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op1_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node);
|
|
if (op1_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigValue *op2_val = ir_resolve_const(ira, casted_operand, UndefBad);
|
|
if (op2_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result = ir_const(ira, source_inst, operand_type);
|
|
copy_const_val(ira->codegen, result->value, op1_val);
|
|
if (op == AtomicRmwOp_xchg) {
|
|
copy_const_val(ira->codegen, op1_val, op2_val);
|
|
return result;
|
|
}
|
|
|
|
if (operand_type->id == ZigTypeIdPointer || operand_type->id == ZigTypeIdOptional) {
|
|
ir_add_error(ira, &instruction->ordering->base,
|
|
buf_sprintf("TODO comptime @atomicRmw with pointers other than .Xchg"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ErrorMsg *msg;
|
|
if (op == AtomicRmwOp_min || op == AtomicRmwOp_max) {
|
|
IrBinOp bin_op;
|
|
if (op == AtomicRmwOp_min)
|
|
// store op2 if op2 < op1
|
|
bin_op = IrBinOpCmpGreaterThan;
|
|
else
|
|
// store op2 if op2 > op1
|
|
bin_op = IrBinOpCmpLessThan;
|
|
|
|
IrInstGen *dummy_value = ir_const(ira, source_inst, operand_type);
|
|
msg = ir_eval_bin_op_cmp_scalar(ira, source_inst, op1_val, bin_op, op2_val, dummy_value->value);
|
|
if (msg != nullptr) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (dummy_value->value->data.x_bool)
|
|
copy_const_val(ira->codegen, op1_val, op2_val);
|
|
} else {
|
|
IrBinOp bin_op;
|
|
switch (op) {
|
|
case AtomicRmwOp_xchg:
|
|
case AtomicRmwOp_max:
|
|
case AtomicRmwOp_min:
|
|
zig_unreachable();
|
|
case AtomicRmwOp_add:
|
|
if (operand_type->id == ZigTypeIdFloat)
|
|
bin_op = IrBinOpAdd;
|
|
else
|
|
bin_op = IrBinOpAddWrap;
|
|
break;
|
|
case AtomicRmwOp_sub:
|
|
if (operand_type->id == ZigTypeIdFloat)
|
|
bin_op = IrBinOpSub;
|
|
else
|
|
bin_op = IrBinOpSubWrap;
|
|
break;
|
|
case AtomicRmwOp_and:
|
|
case AtomicRmwOp_nand:
|
|
bin_op = IrBinOpBinAnd;
|
|
break;
|
|
case AtomicRmwOp_or:
|
|
bin_op = IrBinOpBinOr;
|
|
break;
|
|
case AtomicRmwOp_xor:
|
|
bin_op = IrBinOpBinXor;
|
|
break;
|
|
}
|
|
msg = ir_eval_math_op_scalar(ira, source_inst, operand_type, op1_val, bin_op, op2_val, op1_val);
|
|
if (msg != nullptr) {
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (op == AtomicRmwOp_nand) {
|
|
bigint_not(&op1_val->data.x_bigint, &op1_val->data.x_bigint,
|
|
operand_type->data.integral.bit_count, operand_type->data.integral.is_signed);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
return ir_build_atomic_rmw_gen(ira, source_inst, casted_ptr, casted_operand, op,
|
|
ordering, operand_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstSrcAtomicLoad *instruction) {
|
|
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *ptr_inst = instruction->ptr->child;
|
|
if (type_is_invalid(ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, true);
|
|
IrInstGen *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
|
if (type_is_invalid(casted_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
AtomicOrder ordering;
|
|
if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ordering == AtomicOrderRelease || ordering == AtomicOrderAcqRel) {
|
|
ir_assert(instruction->ordering != nullptr, &instruction->base.base);
|
|
ir_add_error(ira, &instruction->ordering->base,
|
|
buf_sprintf("@atomicLoad atomic ordering must not be Release or AcqRel"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(casted_ptr)) {
|
|
IrInstGen *result = ir_get_deref(ira, &instruction->base.base, casted_ptr, nullptr);
|
|
ir_assert(result->value->type != nullptr, &instruction->base.base);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_atomic_load_gen(ira, &instruction->base.base, casted_ptr, ordering, operand_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcAtomicStore *instruction) {
|
|
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *ptr_inst = instruction->ptr->child;
|
|
if (type_is_invalid(ptr_inst->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
|
|
IrInstGen *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
|
if (type_is_invalid(casted_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_value = ir_implicit_cast(ira, value, operand_type);
|
|
if (type_is_invalid(casted_value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
|
|
AtomicOrder ordering;
|
|
if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ordering == AtomicOrderAcquire || ordering == AtomicOrderAcqRel) {
|
|
ir_assert(instruction->ordering != nullptr, &instruction->base.base);
|
|
ir_add_error(ira, &instruction->ordering->base,
|
|
buf_sprintf("@atomicStore atomic ordering must not be Acquire or AcqRel"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
// special case zero bit types
|
|
switch (type_has_one_possible_value(ira->codegen, operand_type)) {
|
|
case OnePossibleValueInvalid:
|
|
return ira->codegen->invalid_inst_gen;
|
|
case OnePossibleValueYes:
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
case OnePossibleValueNo:
|
|
break;
|
|
}
|
|
|
|
if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) {
|
|
IrInstGen *result = ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, value, false);
|
|
result->value->type = ira->codegen->builtin_types.entry_void;
|
|
return result;
|
|
}
|
|
|
|
return ir_build_atomic_store_gen(ira, &instruction->base.base, casted_ptr, casted_value, ordering);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstSrcSaveErrRetAddr *instruction) {
|
|
return ir_build_save_err_ret_addr_gen(ira, &instruction->base.base);
|
|
}
|
|
|
|
static ErrorMsg *ir_eval_float_op(IrAnalyze *ira, IrInst* source_instr, BuiltinFnId fop, ZigType *float_type,
|
|
ZigValue *op, ZigValue *out_val)
|
|
{
|
|
assert(ira && source_instr && float_type && out_val && op);
|
|
assert(float_type->id == ZigTypeIdFloat ||
|
|
float_type->id == ZigTypeIdComptimeFloat);
|
|
|
|
unsigned bits;
|
|
|
|
switch (float_type->id) {
|
|
case ZigTypeIdComptimeFloat:
|
|
bits = 128;
|
|
break;
|
|
case ZigTypeIdFloat:
|
|
bits = float_type->data.floating.bit_count;
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
|
|
switch (bits) {
|
|
case 16: {
|
|
switch (fop) {
|
|
case BuiltinFnIdSqrt:
|
|
out_val->data.x_f16 = f16_sqrt(op->data.x_f16);
|
|
break;
|
|
case BuiltinFnIdSin:
|
|
out_val->data.x_f16 = zig_double_to_f16(sin(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdCos:
|
|
out_val->data.x_f16 = zig_double_to_f16(cos(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdExp:
|
|
out_val->data.x_f16 = zig_double_to_f16(exp(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdExp2:
|
|
out_val->data.x_f16 = zig_double_to_f16(exp2(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdLog:
|
|
out_val->data.x_f16 = zig_double_to_f16(log(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdLog10:
|
|
out_val->data.x_f16 = zig_double_to_f16(log10(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdLog2:
|
|
out_val->data.x_f16 = zig_double_to_f16(log2(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdFabs:
|
|
out_val->data.x_f16 = zig_double_to_f16(fabs(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdFloor:
|
|
out_val->data.x_f16 = zig_double_to_f16(floor(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdCeil:
|
|
out_val->data.x_f16 = zig_double_to_f16(ceil(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdTrunc:
|
|
out_val->data.x_f16 = zig_double_to_f16(trunc(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdNearbyInt:
|
|
out_val->data.x_f16 = zig_double_to_f16(nearbyint(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
case BuiltinFnIdRound:
|
|
out_val->data.x_f16 = zig_double_to_f16(round(zig_f16_to_double(op->data.x_f16)));
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
};
|
|
break;
|
|
}
|
|
case 32: {
|
|
switch (fop) {
|
|
case BuiltinFnIdSqrt:
|
|
out_val->data.x_f32 = sqrtf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdSin:
|
|
out_val->data.x_f32 = sinf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdCos:
|
|
out_val->data.x_f32 = cosf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdExp:
|
|
out_val->data.x_f32 = expf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdExp2:
|
|
out_val->data.x_f32 = exp2f(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdLog:
|
|
out_val->data.x_f32 = logf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdLog10:
|
|
out_val->data.x_f32 = log10f(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdLog2:
|
|
out_val->data.x_f32 = log2f(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdFabs:
|
|
out_val->data.x_f32 = fabsf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdFloor:
|
|
out_val->data.x_f32 = floorf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdCeil:
|
|
out_val->data.x_f32 = ceilf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdTrunc:
|
|
out_val->data.x_f32 = truncf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdNearbyInt:
|
|
out_val->data.x_f32 = nearbyintf(op->data.x_f32);
|
|
break;
|
|
case BuiltinFnIdRound:
|
|
out_val->data.x_f32 = roundf(op->data.x_f32);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
};
|
|
break;
|
|
}
|
|
case 64: {
|
|
switch (fop) {
|
|
case BuiltinFnIdSqrt:
|
|
out_val->data.x_f64 = sqrt(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdSin:
|
|
out_val->data.x_f64 = sin(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdCos:
|
|
out_val->data.x_f64 = cos(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdExp:
|
|
out_val->data.x_f64 = exp(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdExp2:
|
|
out_val->data.x_f64 = exp2(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdLog:
|
|
out_val->data.x_f64 = log(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdLog10:
|
|
out_val->data.x_f64 = log10(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdLog2:
|
|
out_val->data.x_f64 = log2(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdFabs:
|
|
out_val->data.x_f64 = fabs(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdFloor:
|
|
out_val->data.x_f64 = floor(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdCeil:
|
|
out_val->data.x_f64 = ceil(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdTrunc:
|
|
out_val->data.x_f64 = trunc(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdNearbyInt:
|
|
out_val->data.x_f64 = nearbyint(op->data.x_f64);
|
|
break;
|
|
case BuiltinFnIdRound:
|
|
out_val->data.x_f64 = round(op->data.x_f64);
|
|
break;
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
}
|
|
case 80:
|
|
return ir_add_error(ira, source_instr,
|
|
buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026",
|
|
float_op_to_name(fop), buf_ptr(&float_type->name)));
|
|
case 128: {
|
|
float128_t *out, *in;
|
|
if (float_type->id == ZigTypeIdComptimeFloat) {
|
|
out = &out_val->data.x_bigfloat.value;
|
|
in = &op->data.x_bigfloat.value;
|
|
} else {
|
|
out = &out_val->data.x_f128;
|
|
in = &op->data.x_f128;
|
|
}
|
|
switch (fop) {
|
|
case BuiltinFnIdSqrt:
|
|
f128M_sqrt(in, out);
|
|
break;
|
|
case BuiltinFnIdFabs:
|
|
f128M_abs(in, out);
|
|
break;
|
|
case BuiltinFnIdFloor:
|
|
f128M_roundToInt(in, softfloat_round_min, false, out);
|
|
break;
|
|
case BuiltinFnIdCeil:
|
|
f128M_roundToInt(in, softfloat_round_max, false, out);
|
|
break;
|
|
case BuiltinFnIdTrunc:
|
|
f128M_trunc(in, out);
|
|
break;
|
|
case BuiltinFnIdRound:
|
|
f128M_roundToInt(in, softfloat_round_near_maxMag, false, out);
|
|
break;
|
|
case BuiltinFnIdNearbyInt:
|
|
case BuiltinFnIdSin:
|
|
case BuiltinFnIdCos:
|
|
case BuiltinFnIdExp:
|
|
case BuiltinFnIdExp2:
|
|
case BuiltinFnIdLog:
|
|
case BuiltinFnIdLog10:
|
|
case BuiltinFnIdLog2:
|
|
return ir_add_error(ira, source_instr,
|
|
buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026",
|
|
float_op_to_name(fop), buf_ptr(&float_type->name)));
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
zig_unreachable();
|
|
}
|
|
out_val->special = ConstValSpecialStatic;
|
|
return nullptr;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_float_op(IrAnalyze *ira, IrInstSrcFloatOp *instruction) {
|
|
IrInstGen *operand = instruction->operand->child;
|
|
ZigType *operand_type = operand->value->type;
|
|
if (type_is_invalid(operand_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
// This instruction accepts floats and vectors of floats.
|
|
ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ?
|
|
operand_type->data.vector.elem_type : operand_type;
|
|
|
|
if (scalar_type->id != ZigTypeIdFloat && scalar_type->id != ZigTypeIdComptimeFloat) {
|
|
ir_add_error(ira, &operand->base,
|
|
buf_sprintf("expected float type, found '%s'", buf_ptr(&scalar_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(operand)) {
|
|
ZigValue *operand_val = ir_resolve_const(ira, operand, UndefOk);
|
|
if (operand_val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (operand_val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, operand_type);
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, operand_type);
|
|
ZigValue *out_val = result->value;
|
|
|
|
if (operand_type->id == ZigTypeIdVector) {
|
|
expand_undef_array(ira->codegen, operand_val);
|
|
out_val->special = ConstValSpecialUndef;
|
|
expand_undef_array(ira->codegen, out_val);
|
|
size_t len = operand_type->data.vector.len;
|
|
for (size_t i = 0; i < len; i += 1) {
|
|
ZigValue *elem_operand = &operand_val->data.x_array.data.s_none.elements[i];
|
|
ZigValue *float_out_val = &out_val->data.x_array.data.s_none.elements[i];
|
|
ir_assert(elem_operand->type == scalar_type, &instruction->base.base);
|
|
ir_assert(float_out_val->type == scalar_type, &instruction->base.base);
|
|
ErrorMsg *msg = ir_eval_float_op(ira, &instruction->base.base, instruction->fn_id, scalar_type,
|
|
elem_operand, float_out_val);
|
|
if (msg != nullptr) {
|
|
add_error_note(ira->codegen, msg, instruction->base.base.source_node,
|
|
buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
float_out_val->type = scalar_type;
|
|
}
|
|
out_val->type = operand_type;
|
|
out_val->special = ConstValSpecialStatic;
|
|
} else {
|
|
if (ir_eval_float_op(ira, &instruction->base.base, instruction->fn_id, scalar_type,
|
|
operand_val, out_val) != nullptr)
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
ir_assert(scalar_type->id == ZigTypeIdFloat, &instruction->base.base);
|
|
|
|
return ir_build_float_op_gen(ira, &instruction->base.base, operand, instruction->fn_id, operand_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bswap(IrAnalyze *ira, IrInstSrcBswap *instruction) {
|
|
Error err;
|
|
|
|
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
|
|
if (type_is_invalid(int_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *uncasted_op = instruction->op->child;
|
|
if (type_is_invalid(uncasted_op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
uint32_t vector_len = UINT32_MAX; // means not a vector
|
|
if (uncasted_op->value->type->id == ZigTypeIdArray) {
|
|
bool can_be_vec_elem;
|
|
if ((err = is_valid_vector_elem_type(ira->codegen, uncasted_op->value->type->data.array.child_type,
|
|
&can_be_vec_elem)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
if (can_be_vec_elem) {
|
|
vector_len = uncasted_op->value->type->data.array.len;
|
|
}
|
|
} else if (uncasted_op->value->type->id == ZigTypeIdVector) {
|
|
vector_len = uncasted_op->value->type->data.vector.len;
|
|
}
|
|
|
|
bool is_vector = (vector_len != UINT32_MAX);
|
|
ZigType *op_type = is_vector ? get_vector_type(ira->codegen, vector_len, int_type) : int_type;
|
|
|
|
IrInstGen *op = ir_implicit_cast(ira, uncasted_op, op_type);
|
|
if (type_is_invalid(op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (int_type->data.integral.bit_count == 8 || int_type->data.integral.bit_count == 0)
|
|
return op;
|
|
|
|
if (int_type->data.integral.bit_count % 8 != 0) {
|
|
ir_add_error(ira, &instruction->op->base,
|
|
buf_sprintf("@byteSwap integer type '%s' has %" PRIu32 " bits which is not evenly divisible by 8",
|
|
buf_ptr(&int_type->name), int_type->data.integral.bit_count));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if (instr_is_comptime(op)) {
|
|
ZigValue *val = ir_resolve_const(ira, op, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, op_type);
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, op_type);
|
|
const size_t buf_size = int_type->data.integral.bit_count / 8;
|
|
uint8_t *buf = heap::c_allocator.allocate_nonzero<uint8_t>(buf_size);
|
|
if (is_vector) {
|
|
expand_undef_array(ira->codegen, val);
|
|
result->value->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(op_type->data.vector.len);
|
|
for (unsigned i = 0; i < op_type->data.vector.len; i += 1) {
|
|
ZigValue *op_elem_val = &val->data.x_array.data.s_none.elements[i];
|
|
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, instruction->base.base.source_node,
|
|
op_elem_val, UndefOk)))
|
|
{
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
ZigValue *result_elem_val = &result->value->data.x_array.data.s_none.elements[i];
|
|
result_elem_val->type = int_type;
|
|
result_elem_val->special = op_elem_val->special;
|
|
if (op_elem_val->special == ConstValSpecialUndef)
|
|
continue;
|
|
|
|
bigint_write_twos_complement(&op_elem_val->data.x_bigint, buf, int_type->data.integral.bit_count, true);
|
|
bigint_read_twos_complement(&result->value->data.x_array.data.s_none.elements[i].data.x_bigint,
|
|
buf, int_type->data.integral.bit_count, false,
|
|
int_type->data.integral.is_signed);
|
|
}
|
|
} else {
|
|
bigint_write_twos_complement(&val->data.x_bigint, buf, int_type->data.integral.bit_count, true);
|
|
bigint_read_twos_complement(&result->value->data.x_bigint, buf, int_type->data.integral.bit_count, false,
|
|
int_type->data.integral.is_signed);
|
|
}
|
|
heap::c_allocator.deallocate(buf, buf_size);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_bswap_gen(ira, &instruction->base.base, op_type, op);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bit_reverse(IrAnalyze *ira, IrInstSrcBitReverse *instruction) {
|
|
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
|
|
if (type_is_invalid(int_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *op = ir_implicit_cast(ira, instruction->op->child, int_type);
|
|
if (type_is_invalid(op->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (int_type->data.integral.bit_count == 0) {
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, int_type);
|
|
bigint_init_unsigned(&result->value->data.x_bigint, 0);
|
|
return result;
|
|
}
|
|
|
|
if (instr_is_comptime(op)) {
|
|
ZigValue *val = ir_resolve_const(ira, op, UndefOk);
|
|
if (val == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (val->special == ConstValSpecialUndef)
|
|
return ir_const_undef(ira, &instruction->base.base, int_type);
|
|
|
|
IrInstGen *result = ir_const(ira, &instruction->base.base, int_type);
|
|
size_t num_bits = int_type->data.integral.bit_count;
|
|
size_t buf_size = (num_bits + 7) / 8;
|
|
uint8_t *comptime_buf = heap::c_allocator.allocate_nonzero<uint8_t>(buf_size);
|
|
uint8_t *result_buf = heap::c_allocator.allocate_nonzero<uint8_t>(buf_size);
|
|
memset(comptime_buf,0,buf_size);
|
|
memset(result_buf,0,buf_size);
|
|
|
|
bigint_write_twos_complement(&val->data.x_bigint,comptime_buf,num_bits,ira->codegen->is_big_endian);
|
|
|
|
size_t bit_i = 0;
|
|
size_t bit_rev_i = num_bits - 1;
|
|
for (; bit_i < num_bits; bit_i++, bit_rev_i--) {
|
|
if (comptime_buf[bit_i / 8] & (1 << (bit_i % 8))) {
|
|
result_buf[bit_rev_i / 8] |= (1 << (bit_rev_i % 8));
|
|
}
|
|
}
|
|
|
|
bigint_read_twos_complement(&result->value->data.x_bigint,
|
|
result_buf,
|
|
int_type->data.integral.bit_count,
|
|
ira->codegen->is_big_endian,
|
|
int_type->data.integral.is_signed);
|
|
|
|
heap::c_allocator.deallocate(comptime_buf, buf_size);
|
|
heap::c_allocator.deallocate(result_buf, buf_size);
|
|
return result;
|
|
}
|
|
|
|
return ir_build_bit_reverse_gen(ira, &instruction->base.base, int_type, op);
|
|
}
|
|
|
|
|
|
static IrInstGen *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInstSrcEnumToInt *instruction) {
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_enum_to_int(ira, &instruction->base.base, target);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInstSrcIntToEnum *instruction) {
|
|
Error err;
|
|
IrInstGen *dest_type_value = instruction->dest_type->child;
|
|
ZigType *dest_type = ir_resolve_type(ira, dest_type_value);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (dest_type->id != ZigTypeIdEnum) {
|
|
ir_add_error(ira, &instruction->dest_type->base,
|
|
buf_sprintf("expected enum, found type '%s'", buf_ptr(&dest_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, dest_type, ResolveStatusZeroBitsKnown)))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *tag_type = dest_type->data.enumeration.tag_int_type;
|
|
|
|
IrInstGen *target = instruction->target->child;
|
|
if (type_is_invalid(target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *casted_target = ir_implicit_cast(ira, target, tag_type);
|
|
if (type_is_invalid(casted_target->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_int_to_enum(ira, &instruction->base.base, casted_target, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_check_runtime_scope(IrAnalyze *ira, IrInstSrcCheckRuntimeScope *instruction) {
|
|
IrInstGen *block_comptime_inst = instruction->scope_is_comptime->child;
|
|
bool scope_is_comptime;
|
|
if (!ir_resolve_bool(ira, block_comptime_inst, &scope_is_comptime))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *is_comptime_inst = instruction->is_comptime->child;
|
|
bool is_comptime;
|
|
if (!ir_resolve_bool(ira, is_comptime_inst, &is_comptime))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!scope_is_comptime && is_comptime) {
|
|
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("comptime control flow inside runtime block"));
|
|
add_error_note(ira->codegen, msg, block_comptime_inst->base.source_node,
|
|
buf_sprintf("runtime block created here"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_has_decl(IrAnalyze *ira, IrInstSrcHasDecl *instruction) {
|
|
ZigType *container_type = ir_resolve_type(ira, instruction->container->child);
|
|
if (type_is_invalid(container_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
Buf *name = ir_resolve_str(ira, instruction->name->child);
|
|
if (name == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!is_container(container_type)) {
|
|
ir_add_error(ira, &instruction->container->base,
|
|
buf_sprintf("expected struct, enum, or union; found '%s'", buf_ptr(&container_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ScopeDecls *container_scope = get_container_scope(container_type);
|
|
Tld *tld = find_container_decl(ira->codegen, container_scope, name);
|
|
if (tld == nullptr)
|
|
return ir_const_bool(ira, &instruction->base.base, false);
|
|
|
|
if (tld->visib_mod == VisibModPrivate && tld->import != get_scope_import(instruction->base.base.scope)) {
|
|
return ir_const_bool(ira, &instruction->base.base, false);
|
|
}
|
|
|
|
return ir_const_bool(ira, &instruction->base.base, true);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_undeclared_ident(IrAnalyze *ira, IrInstSrcUndeclaredIdent *instruction) {
|
|
// put a variable of same name with invalid type in global scope
|
|
// so that future references to this same name will find a variable with an invalid type
|
|
populate_invalid_variable_in_scope(ira->codegen, instruction->base.base.scope,
|
|
instruction->base.base.source_node, instruction->name);
|
|
ir_add_error(ira, &instruction->base.base,
|
|
buf_sprintf("use of undeclared identifier '%s'", buf_ptr(instruction->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_end_expr(IrAnalyze *ira, IrInstSrcEndExpr *instruction) {
|
|
IrInstGen *value = instruction->value->child;
|
|
if (type_is_invalid(value->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
bool was_written = instruction->result_loc->written;
|
|
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
|
value->value->type, value, false, true);
|
|
if (result_loc != nullptr) {
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
if (result_loc->value->type->id == ZigTypeIdUnreachable)
|
|
return result_loc;
|
|
|
|
if (!was_written || instruction->result_loc->id == ResultLocIdPeer) {
|
|
IrInstGen *store_ptr = ir_analyze_store_ptr(ira, &instruction->base.base, result_loc, value,
|
|
instruction->result_loc->allow_write_through_const);
|
|
if (type_is_invalid(store_ptr->value->type)) {
|
|
if (instruction->result_loc->id == ResultLocIdReturn &&
|
|
(value->value->type->id == ZigTypeIdErrorUnion || value->value->type->id == ZigTypeIdErrorSet) &&
|
|
ira->explicit_return_type->id != ZigTypeIdErrorUnion && ira->explicit_return_type->id != ZigTypeIdErrorSet)
|
|
{
|
|
add_error_note(ira->codegen, ira->new_irb.exec->first_err_trace_msg,
|
|
ira->explicit_return_type_source_node, buf_create_from_str("function cannot return an error"));
|
|
}
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
}
|
|
|
|
if (result_loc->value->data.x_ptr.mut == ConstPtrMutInfer &&
|
|
instruction->result_loc->id != ResultLocIdPeer)
|
|
{
|
|
if (instr_is_comptime(value)) {
|
|
result_loc->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
|
} else {
|
|
result_loc->value->special = ConstValSpecialRuntime;
|
|
}
|
|
}
|
|
}
|
|
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_implicit_cast(IrAnalyze *ira, IrInstSrcImplicitCast *instruction) {
|
|
IrInstGen *operand = instruction->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return operand;
|
|
|
|
ZigType *dest_type = ir_resolve_type(ira, instruction->result_loc_cast->base.source_instruction->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_implicit_cast2(ira, &instruction->base.base, operand, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_bit_cast_src(IrAnalyze *ira, IrInstSrcBitCast *instruction) {
|
|
IrInstGen *operand = instruction->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return operand;
|
|
|
|
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base,
|
|
&instruction->result_loc_bit_cast->base, operand->value->type, operand, false, true);
|
|
if (result_loc != nullptr &&
|
|
(type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable))
|
|
{
|
|
return result_loc;
|
|
}
|
|
|
|
ZigType *dest_type = ir_resolve_type(ira,
|
|
instruction->result_loc_bit_cast->base.source_instruction->child);
|
|
if (type_is_invalid(dest_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
return ir_analyze_bit_cast(ira, &instruction->base.base, operand, dest_type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_union_init_named_field(IrAnalyze *ira,
|
|
IrInstSrcUnionInitNamedField *instruction)
|
|
{
|
|
ZigType *union_type = ir_resolve_type(ira, instruction->union_type->child);
|
|
if (type_is_invalid(union_type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (union_type->id != ZigTypeIdUnion) {
|
|
ir_add_error(ira, &instruction->union_type->base,
|
|
buf_sprintf("non-union type '%s' passed to @unionInit", buf_ptr(&union_type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
Buf *field_name = ir_resolve_str(ira, instruction->field_name->child);
|
|
if (field_name == nullptr)
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *field_result_loc = instruction->field_result_loc->child;
|
|
if (type_is_invalid(field_result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *result_loc = instruction->result_loc->child;
|
|
if (type_is_invalid(result_loc->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_analyze_union_init(ira, &instruction->base.base, instruction->base.base.source_node,
|
|
union_type, field_name, field_result_loc, result_loc);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstSrcSuspendBegin *instruction) {
|
|
return ir_build_suspend_begin_gen(ira, &instruction->base.base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, IrInstSrcSuspendFinish *instruction) {
|
|
IrInstGen *begin_base = instruction->begin->base.child;
|
|
if (type_is_invalid(begin_base->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ir_assert(begin_base->id == IrInstGenIdSuspendBegin, &instruction->base.base);
|
|
IrInstGenSuspendBegin *begin = reinterpret_cast<IrInstGenSuspendBegin *>(begin_base);
|
|
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
ir_assert(fn_entry != nullptr, &instruction->base.base);
|
|
|
|
if (fn_entry->inferred_async_node == nullptr) {
|
|
fn_entry->inferred_async_node = instruction->base.base.source_node;
|
|
}
|
|
|
|
return ir_build_suspend_finish_gen(ira, &instruction->base.base, begin);
|
|
}
|
|
|
|
static IrInstGen *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInst* source_instr,
|
|
IrInstGen *frame_ptr, ZigFn **target_fn)
|
|
{
|
|
if (type_is_invalid(frame_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
*target_fn = nullptr;
|
|
|
|
ZigType *result_type;
|
|
IrInstGen *frame;
|
|
if (frame_ptr->value->type->id == ZigTypeIdPointer &&
|
|
frame_ptr->value->type->data.pointer.ptr_len == PtrLenSingle &&
|
|
frame_ptr->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
|
|
{
|
|
ZigFn *func = frame_ptr->value->type->data.pointer.child_type->data.frame.fn;
|
|
result_type = func->type_entry->data.fn.fn_type_id.return_type;
|
|
*target_fn = func;
|
|
frame = frame_ptr;
|
|
} else {
|
|
frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
|
|
if (frame->value->type->id == ZigTypeIdPointer &&
|
|
frame->value->type->data.pointer.ptr_len == PtrLenSingle &&
|
|
frame->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
|
|
{
|
|
ZigFn *func = frame->value->type->data.pointer.child_type->data.frame.fn;
|
|
result_type = func->type_entry->data.fn.fn_type_id.return_type;
|
|
*target_fn = func;
|
|
} else if (frame->value->type->id != ZigTypeIdAnyFrame ||
|
|
frame->value->type->data.any_frame.result_type == nullptr)
|
|
{
|
|
ir_add_error(ira, source_instr,
|
|
buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value->type->name)));
|
|
return ira->codegen->invalid_inst_gen;
|
|
} else {
|
|
result_type = frame->value->type->data.any_frame.result_type;
|
|
}
|
|
}
|
|
|
|
ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
|
|
IrInstGen *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
|
|
if (type_is_invalid(casted_frame->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return casted_frame;
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_await(IrAnalyze *ira, IrInstSrcAwait *instruction) {
|
|
IrInstGen *operand = instruction->frame->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
ZigFn *target_fn;
|
|
IrInstGen *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base.base, operand, &target_fn);
|
|
if (type_is_invalid(frame->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
ZigType *result_type = frame->value->type->data.any_frame.result_type;
|
|
|
|
ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
|
|
ir_assert(fn_entry != nullptr, &instruction->base.base);
|
|
|
|
// If it's not @Frame(func) then it's definitely a suspend point
|
|
if (target_fn == nullptr && !instruction->is_nosuspend) {
|
|
if (fn_entry->inferred_async_node == nullptr) {
|
|
fn_entry->inferred_async_node = instruction->base.base.source_node;
|
|
}
|
|
}
|
|
|
|
if (type_can_fail(result_type)) {
|
|
fn_entry->calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
IrInstGen *result_loc;
|
|
if (type_has_bits(ira->codegen, result_type)) {
|
|
result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
|
result_type, nullptr, true, true);
|
|
if (result_loc != nullptr &&
|
|
(type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable))
|
|
{
|
|
return result_loc;
|
|
}
|
|
} else {
|
|
result_loc = nullptr;
|
|
}
|
|
|
|
IrInstGenAwait *result = ir_build_await_gen(ira, &instruction->base.base, frame, result_type, result_loc,
|
|
instruction->is_nosuspend);
|
|
result->target_fn = target_fn;
|
|
fn_entry->await_list.append(result);
|
|
return ir_finish_anal(ira, &result->base);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstSrcResume *instruction) {
|
|
IrInstGen *frame_ptr = instruction->frame->child;
|
|
if (type_is_invalid(frame_ptr->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
IrInstGen *frame;
|
|
if (frame_ptr->value->type->id == ZigTypeIdPointer &&
|
|
frame_ptr->value->type->data.pointer.ptr_len == PtrLenSingle &&
|
|
frame_ptr->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
|
|
{
|
|
frame = frame_ptr;
|
|
} else {
|
|
frame = ir_get_deref(ira, &instruction->base.base, frame_ptr, nullptr);
|
|
}
|
|
|
|
ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
|
|
IrInstGen *casted_frame = ir_implicit_cast2(ira, &instruction->frame->base, frame, any_frame_type);
|
|
if (type_is_invalid(casted_frame->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
return ir_build_resume_gen(ira, &instruction->base.base, casted_frame);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstSrcSpillBegin *instruction) {
|
|
if (ir_should_inline(ira->old_irb.exec, instruction->base.base.scope))
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
|
|
IrInstGen *operand = instruction->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (!type_has_bits(ira->codegen, operand->value->type))
|
|
return ir_const_void(ira, &instruction->base.base);
|
|
|
|
switch (instruction->spill_id) {
|
|
case SpillIdInvalid:
|
|
zig_unreachable();
|
|
case SpillIdRetErrCode:
|
|
ira->new_irb.exec->need_err_code_spill = true;
|
|
break;
|
|
}
|
|
|
|
return ir_build_spill_begin_gen(ira, &instruction->base.base, operand, instruction->spill_id);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstSrcSpillEnd *instruction) {
|
|
IrInstGen *operand = instruction->begin->operand->child;
|
|
if (type_is_invalid(operand->value->type))
|
|
return ira->codegen->invalid_inst_gen;
|
|
|
|
if (ir_should_inline(ira->old_irb.exec, instruction->base.base.scope) ||
|
|
!type_has_bits(ira->codegen, operand->value->type) ||
|
|
instr_is_comptime(operand))
|
|
{
|
|
return operand;
|
|
}
|
|
|
|
ir_assert(instruction->begin->base.child->id == IrInstGenIdSpillBegin, &instruction->base.base);
|
|
IrInstGenSpillBegin *begin = reinterpret_cast<IrInstGenSpillBegin *>(instruction->begin->base.child);
|
|
|
|
return ir_build_spill_end_gen(ira, &instruction->base.base, begin, operand->value->type);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_src(IrAnalyze *ira, IrInstSrcSrc *instruction) {
|
|
ZigFn *fn_entry = scope_fn_entry(instruction->base.base.scope);
|
|
if (fn_entry == nullptr) {
|
|
ir_add_error(ira, &instruction->base.base, buf_sprintf("@src outside function"));
|
|
return ira->codegen->invalid_inst_gen;
|
|
}
|
|
|
|
ZigType *u8_ptr = get_pointer_to_type_extra2(
|
|
ira->codegen, ira->codegen->builtin_types.entry_u8,
|
|
true, false, PtrLenUnknown,
|
|
0, 0, 0, false, VECTOR_INDEX_NONE, nullptr, ira->codegen->intern.for_zero_byte());
|
|
ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
|
|
|
|
ZigType *source_location_type = get_builtin_type(ira->codegen, "SourceLocation");
|
|
if (type_resolve(ira->codegen, source_location_type, ResolveStatusSizeKnown)) {
|
|
zig_unreachable();
|
|
}
|
|
|
|
ZigValue *result = ira->codegen->pass1_arena->create<ZigValue>();
|
|
result->special = ConstValSpecialStatic;
|
|
result->type = source_location_type;
|
|
|
|
ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 4);
|
|
result->data.x_struct.fields = fields;
|
|
|
|
// file: [:0]const u8
|
|
ensure_field_index(source_location_type, "file", 0);
|
|
fields[0]->special = ConstValSpecialStatic;
|
|
|
|
ZigType *import = instruction->base.base.source_node->owner;
|
|
Buf *path = import->data.structure.root_struct->path;
|
|
ZigValue *file_name = create_const_str_lit(ira->codegen, path)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, fields[0], file_name, 0, buf_len(path), true);
|
|
fields[0]->type = u8_slice;
|
|
|
|
// fn_name: [:0]const u8
|
|
ensure_field_index(source_location_type, "fn_name", 1);
|
|
fields[1]->special = ConstValSpecialStatic;
|
|
|
|
ZigValue *fn_name = create_const_str_lit(ira->codegen, &fn_entry->symbol_name)->data.x_ptr.data.ref.pointee;
|
|
init_const_slice(ira->codegen, fields[1], fn_name, 0, buf_len(&fn_entry->symbol_name), true);
|
|
fields[1]->type = u8_slice;
|
|
|
|
// line: u32
|
|
ensure_field_index(source_location_type, "line", 2);
|
|
fields[2]->special = ConstValSpecialStatic;
|
|
fields[2]->type = ira->codegen->builtin_types.entry_u32;
|
|
bigint_init_unsigned(&fields[2]->data.x_bigint, instruction->base.base.source_node->line + 1);
|
|
|
|
// column: u32
|
|
ensure_field_index(source_location_type, "column", 3);
|
|
fields[3]->special = ConstValSpecialStatic;
|
|
fields[3]->type = ira->codegen->builtin_types.entry_u32;
|
|
bigint_init_unsigned(&fields[3]->data.x_bigint, instruction->base.base.source_node->column + 1);
|
|
|
|
return ir_const_move(ira, &instruction->base.base, result);
|
|
}
|
|
|
|
static IrInstGen *ir_analyze_instruction_base(IrAnalyze *ira, IrInstSrc *instruction) {
|
|
switch (instruction->id) {
|
|
case IrInstSrcIdInvalid:
|
|
zig_unreachable();
|
|
|
|
case IrInstSrcIdReturn:
|
|
return ir_analyze_instruction_return(ira, (IrInstSrcReturn *)instruction);
|
|
case IrInstSrcIdConst:
|
|
return ir_analyze_instruction_const(ira, (IrInstSrcConst *)instruction);
|
|
case IrInstSrcIdUnOp:
|
|
return ir_analyze_instruction_un_op(ira, (IrInstSrcUnOp *)instruction);
|
|
case IrInstSrcIdBinOp:
|
|
return ir_analyze_instruction_bin_op(ira, (IrInstSrcBinOp *)instruction);
|
|
case IrInstSrcIdMergeErrSets:
|
|
return ir_analyze_instruction_merge_err_sets(ira, (IrInstSrcMergeErrSets *)instruction);
|
|
case IrInstSrcIdDeclVar:
|
|
return ir_analyze_instruction_decl_var(ira, (IrInstSrcDeclVar *)instruction);
|
|
case IrInstSrcIdLoadPtr:
|
|
return ir_analyze_instruction_load_ptr(ira, (IrInstSrcLoadPtr *)instruction);
|
|
case IrInstSrcIdStorePtr:
|
|
return ir_analyze_instruction_store_ptr(ira, (IrInstSrcStorePtr *)instruction);
|
|
case IrInstSrcIdElemPtr:
|
|
return ir_analyze_instruction_elem_ptr(ira, (IrInstSrcElemPtr *)instruction);
|
|
case IrInstSrcIdVarPtr:
|
|
return ir_analyze_instruction_var_ptr(ira, (IrInstSrcVarPtr *)instruction);
|
|
case IrInstSrcIdFieldPtr:
|
|
return ir_analyze_instruction_field_ptr(ira, (IrInstSrcFieldPtr *)instruction);
|
|
case IrInstSrcIdCall:
|
|
return ir_analyze_instruction_call(ira, (IrInstSrcCall *)instruction);
|
|
case IrInstSrcIdCallArgs:
|
|
return ir_analyze_instruction_call_args(ira, (IrInstSrcCallArgs *)instruction);
|
|
case IrInstSrcIdCallExtra:
|
|
return ir_analyze_instruction_call_extra(ira, (IrInstSrcCallExtra *)instruction);
|
|
case IrInstSrcIdAsyncCallExtra:
|
|
return ir_analyze_instruction_async_call_extra(ira, (IrInstSrcAsyncCallExtra *)instruction);
|
|
case IrInstSrcIdBr:
|
|
return ir_analyze_instruction_br(ira, (IrInstSrcBr *)instruction);
|
|
case IrInstSrcIdCondBr:
|
|
return ir_analyze_instruction_cond_br(ira, (IrInstSrcCondBr *)instruction);
|
|
case IrInstSrcIdUnreachable:
|
|
return ir_analyze_instruction_unreachable(ira, (IrInstSrcUnreachable *)instruction);
|
|
case IrInstSrcIdPhi:
|
|
return ir_analyze_instruction_phi(ira, (IrInstSrcPhi *)instruction);
|
|
case IrInstSrcIdTypeOf:
|
|
return ir_analyze_instruction_typeof(ira, (IrInstSrcTypeOf *)instruction);
|
|
case IrInstSrcIdSetCold:
|
|
return ir_analyze_instruction_set_cold(ira, (IrInstSrcSetCold *)instruction);
|
|
case IrInstSrcIdSetRuntimeSafety:
|
|
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstSrcSetRuntimeSafety *)instruction);
|
|
case IrInstSrcIdSetFloatMode:
|
|
return ir_analyze_instruction_set_float_mode(ira, (IrInstSrcSetFloatMode *)instruction);
|
|
case IrInstSrcIdAnyFrameType:
|
|
return ir_analyze_instruction_any_frame_type(ira, (IrInstSrcAnyFrameType *)instruction);
|
|
case IrInstSrcIdSliceType:
|
|
return ir_analyze_instruction_slice_type(ira, (IrInstSrcSliceType *)instruction);
|
|
case IrInstSrcIdAsm:
|
|
return ir_analyze_instruction_asm(ira, (IrInstSrcAsm *)instruction);
|
|
case IrInstSrcIdArrayType:
|
|
return ir_analyze_instruction_array_type(ira, (IrInstSrcArrayType *)instruction);
|
|
case IrInstSrcIdSizeOf:
|
|
return ir_analyze_instruction_size_of(ira, (IrInstSrcSizeOf *)instruction);
|
|
case IrInstSrcIdTestNonNull:
|
|
return ir_analyze_instruction_test_non_null(ira, (IrInstSrcTestNonNull *)instruction);
|
|
case IrInstSrcIdOptionalUnwrapPtr:
|
|
return ir_analyze_instruction_optional_unwrap_ptr(ira, (IrInstSrcOptionalUnwrapPtr *)instruction);
|
|
case IrInstSrcIdClz:
|
|
return ir_analyze_instruction_clz(ira, (IrInstSrcClz *)instruction);
|
|
case IrInstSrcIdCtz:
|
|
return ir_analyze_instruction_ctz(ira, (IrInstSrcCtz *)instruction);
|
|
case IrInstSrcIdPopCount:
|
|
return ir_analyze_instruction_pop_count(ira, (IrInstSrcPopCount *)instruction);
|
|
case IrInstSrcIdBswap:
|
|
return ir_analyze_instruction_bswap(ira, (IrInstSrcBswap *)instruction);
|
|
case IrInstSrcIdBitReverse:
|
|
return ir_analyze_instruction_bit_reverse(ira, (IrInstSrcBitReverse *)instruction);
|
|
case IrInstSrcIdSwitchBr:
|
|
return ir_analyze_instruction_switch_br(ira, (IrInstSrcSwitchBr *)instruction);
|
|
case IrInstSrcIdSwitchTarget:
|
|
return ir_analyze_instruction_switch_target(ira, (IrInstSrcSwitchTarget *)instruction);
|
|
case IrInstSrcIdSwitchVar:
|
|
return ir_analyze_instruction_switch_var(ira, (IrInstSrcSwitchVar *)instruction);
|
|
case IrInstSrcIdSwitchElseVar:
|
|
return ir_analyze_instruction_switch_else_var(ira, (IrInstSrcSwitchElseVar *)instruction);
|
|
case IrInstSrcIdImport:
|
|
return ir_analyze_instruction_import(ira, (IrInstSrcImport *)instruction);
|
|
case IrInstSrcIdRef:
|
|
return ir_analyze_instruction_ref(ira, (IrInstSrcRef *)instruction);
|
|
case IrInstSrcIdContainerInitList:
|
|
return ir_analyze_instruction_container_init_list(ira, (IrInstSrcContainerInitList *)instruction);
|
|
case IrInstSrcIdContainerInitFields:
|
|
return ir_analyze_instruction_container_init_fields(ira, (IrInstSrcContainerInitFields *)instruction);
|
|
case IrInstSrcIdCompileErr:
|
|
return ir_analyze_instruction_compile_err(ira, (IrInstSrcCompileErr *)instruction);
|
|
case IrInstSrcIdCompileLog:
|
|
return ir_analyze_instruction_compile_log(ira, (IrInstSrcCompileLog *)instruction);
|
|
case IrInstSrcIdErrName:
|
|
return ir_analyze_instruction_err_name(ira, (IrInstSrcErrName *)instruction);
|
|
case IrInstSrcIdTypeName:
|
|
return ir_analyze_instruction_type_name(ira, (IrInstSrcTypeName *)instruction);
|
|
case IrInstSrcIdCImport:
|
|
return ir_analyze_instruction_c_import(ira, (IrInstSrcCImport *)instruction);
|
|
case IrInstSrcIdCInclude:
|
|
return ir_analyze_instruction_c_include(ira, (IrInstSrcCInclude *)instruction);
|
|
case IrInstSrcIdCDefine:
|
|
return ir_analyze_instruction_c_define(ira, (IrInstSrcCDefine *)instruction);
|
|
case IrInstSrcIdCUndef:
|
|
return ir_analyze_instruction_c_undef(ira, (IrInstSrcCUndef *)instruction);
|
|
case IrInstSrcIdEmbedFile:
|
|
return ir_analyze_instruction_embed_file(ira, (IrInstSrcEmbedFile *)instruction);
|
|
case IrInstSrcIdCmpxchg:
|
|
return ir_analyze_instruction_cmpxchg(ira, (IrInstSrcCmpxchg *)instruction);
|
|
case IrInstSrcIdFence:
|
|
return ir_analyze_instruction_fence(ira, (IrInstSrcFence *)instruction);
|
|
case IrInstSrcIdReduce:
|
|
return ir_analyze_instruction_reduce(ira, (IrInstSrcReduce *)instruction);
|
|
case IrInstSrcIdTruncate:
|
|
return ir_analyze_instruction_truncate(ira, (IrInstSrcTruncate *)instruction);
|
|
case IrInstSrcIdIntCast:
|
|
return ir_analyze_instruction_int_cast(ira, (IrInstSrcIntCast *)instruction);
|
|
case IrInstSrcIdFloatCast:
|
|
return ir_analyze_instruction_float_cast(ira, (IrInstSrcFloatCast *)instruction);
|
|
case IrInstSrcIdErrSetCast:
|
|
return ir_analyze_instruction_err_set_cast(ira, (IrInstSrcErrSetCast *)instruction);
|
|
case IrInstSrcIdIntToFloat:
|
|
return ir_analyze_instruction_int_to_float(ira, (IrInstSrcIntToFloat *)instruction);
|
|
case IrInstSrcIdFloatToInt:
|
|
return ir_analyze_instruction_float_to_int(ira, (IrInstSrcFloatToInt *)instruction);
|
|
case IrInstSrcIdBoolToInt:
|
|
return ir_analyze_instruction_bool_to_int(ira, (IrInstSrcBoolToInt *)instruction);
|
|
case IrInstSrcIdVectorType:
|
|
return ir_analyze_instruction_vector_type(ira, (IrInstSrcVectorType *)instruction);
|
|
case IrInstSrcIdShuffleVector:
|
|
return ir_analyze_instruction_shuffle_vector(ira, (IrInstSrcShuffleVector *)instruction);
|
|
case IrInstSrcIdSplat:
|
|
return ir_analyze_instruction_splat(ira, (IrInstSrcSplat *)instruction);
|
|
case IrInstSrcIdBoolNot:
|
|
return ir_analyze_instruction_bool_not(ira, (IrInstSrcBoolNot *)instruction);
|
|
case IrInstSrcIdMemset:
|
|
return ir_analyze_instruction_memset(ira, (IrInstSrcMemset *)instruction);
|
|
case IrInstSrcIdMemcpy:
|
|
return ir_analyze_instruction_memcpy(ira, (IrInstSrcMemcpy *)instruction);
|
|
case IrInstSrcIdSlice:
|
|
return ir_analyze_instruction_slice(ira, (IrInstSrcSlice *)instruction);
|
|
case IrInstSrcIdBreakpoint:
|
|
return ir_analyze_instruction_breakpoint(ira, (IrInstSrcBreakpoint *)instruction);
|
|
case IrInstSrcIdReturnAddress:
|
|
return ir_analyze_instruction_return_address(ira, (IrInstSrcReturnAddress *)instruction);
|
|
case IrInstSrcIdFrameAddress:
|
|
return ir_analyze_instruction_frame_address(ira, (IrInstSrcFrameAddress *)instruction);
|
|
case IrInstSrcIdFrameHandle:
|
|
return ir_analyze_instruction_frame_handle(ira, (IrInstSrcFrameHandle *)instruction);
|
|
case IrInstSrcIdFrameType:
|
|
return ir_analyze_instruction_frame_type(ira, (IrInstSrcFrameType *)instruction);
|
|
case IrInstSrcIdFrameSize:
|
|
return ir_analyze_instruction_frame_size(ira, (IrInstSrcFrameSize *)instruction);
|
|
case IrInstSrcIdAlignOf:
|
|
return ir_analyze_instruction_align_of(ira, (IrInstSrcAlignOf *)instruction);
|
|
case IrInstSrcIdOverflowOp:
|
|
return ir_analyze_instruction_overflow_op(ira, (IrInstSrcOverflowOp *)instruction);
|
|
case IrInstSrcIdTestErr:
|
|
return ir_analyze_instruction_test_err(ira, (IrInstSrcTestErr *)instruction);
|
|
case IrInstSrcIdUnwrapErrCode:
|
|
return ir_analyze_instruction_unwrap_err_code(ira, (IrInstSrcUnwrapErrCode *)instruction);
|
|
case IrInstSrcIdUnwrapErrPayload:
|
|
return ir_analyze_instruction_unwrap_err_payload(ira, (IrInstSrcUnwrapErrPayload *)instruction);
|
|
case IrInstSrcIdFnProto:
|
|
return ir_analyze_instruction_fn_proto(ira, (IrInstSrcFnProto *)instruction);
|
|
case IrInstSrcIdTestComptime:
|
|
return ir_analyze_instruction_test_comptime(ira, (IrInstSrcTestComptime *)instruction);
|
|
case IrInstSrcIdCheckSwitchProngs:
|
|
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstSrcCheckSwitchProngs *)instruction);
|
|
case IrInstSrcIdCheckStatementIsVoid:
|
|
return ir_analyze_instruction_check_statement_is_void(ira, (IrInstSrcCheckStatementIsVoid *)instruction);
|
|
case IrInstSrcIdDeclRef:
|
|
return ir_analyze_instruction_decl_ref(ira, (IrInstSrcDeclRef *)instruction);
|
|
case IrInstSrcIdPanic:
|
|
return ir_analyze_instruction_panic(ira, (IrInstSrcPanic *)instruction);
|
|
case IrInstSrcIdPtrCast:
|
|
return ir_analyze_instruction_ptr_cast(ira, (IrInstSrcPtrCast *)instruction);
|
|
case IrInstSrcIdIntToPtr:
|
|
return ir_analyze_instruction_int_to_ptr(ira, (IrInstSrcIntToPtr *)instruction);
|
|
case IrInstSrcIdPtrToInt:
|
|
return ir_analyze_instruction_ptr_to_int(ira, (IrInstSrcPtrToInt *)instruction);
|
|
case IrInstSrcIdTagName:
|
|
return ir_analyze_instruction_enum_tag_name(ira, (IrInstSrcTagName *)instruction);
|
|
case IrInstSrcIdFieldParentPtr:
|
|
return ir_analyze_instruction_field_parent_ptr(ira, (IrInstSrcFieldParentPtr *)instruction);
|
|
case IrInstSrcIdByteOffsetOf:
|
|
return ir_analyze_instruction_byte_offset_of(ira, (IrInstSrcByteOffsetOf *)instruction);
|
|
case IrInstSrcIdBitOffsetOf:
|
|
return ir_analyze_instruction_bit_offset_of(ira, (IrInstSrcBitOffsetOf *)instruction);
|
|
case IrInstSrcIdTypeInfo:
|
|
return ir_analyze_instruction_type_info(ira, (IrInstSrcTypeInfo *) instruction);
|
|
case IrInstSrcIdType:
|
|
return ir_analyze_instruction_type(ira, (IrInstSrcType *)instruction);
|
|
case IrInstSrcIdHasField:
|
|
return ir_analyze_instruction_has_field(ira, (IrInstSrcHasField *) instruction);
|
|
case IrInstSrcIdSetEvalBranchQuota:
|
|
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstSrcSetEvalBranchQuota *)instruction);
|
|
case IrInstSrcIdPtrType:
|
|
return ir_analyze_instruction_ptr_type(ira, (IrInstSrcPtrType *)instruction);
|
|
case IrInstSrcIdAlignCast:
|
|
return ir_analyze_instruction_align_cast(ira, (IrInstSrcAlignCast *)instruction);
|
|
case IrInstSrcIdImplicitCast:
|
|
return ir_analyze_instruction_implicit_cast(ira, (IrInstSrcImplicitCast *)instruction);
|
|
case IrInstSrcIdResolveResult:
|
|
return ir_analyze_instruction_resolve_result(ira, (IrInstSrcResolveResult *)instruction);
|
|
case IrInstSrcIdResetResult:
|
|
return ir_analyze_instruction_reset_result(ira, (IrInstSrcResetResult *)instruction);
|
|
case IrInstSrcIdSetAlignStack:
|
|
return ir_analyze_instruction_set_align_stack(ira, (IrInstSrcSetAlignStack *)instruction);
|
|
case IrInstSrcIdArgType:
|
|
return ir_analyze_instruction_arg_type(ira, (IrInstSrcArgType *)instruction);
|
|
case IrInstSrcIdTagType:
|
|
return ir_analyze_instruction_tag_type(ira, (IrInstSrcTagType *)instruction);
|
|
case IrInstSrcIdExport:
|
|
return ir_analyze_instruction_export(ira, (IrInstSrcExport *)instruction);
|
|
case IrInstSrcIdExtern:
|
|
return ir_analyze_instruction_extern(ira, (IrInstSrcExtern *)instruction);
|
|
case IrInstSrcIdErrorReturnTrace:
|
|
return ir_analyze_instruction_error_return_trace(ira, (IrInstSrcErrorReturnTrace *)instruction);
|
|
case IrInstSrcIdErrorUnion:
|
|
return ir_analyze_instruction_error_union(ira, (IrInstSrcErrorUnion *)instruction);
|
|
case IrInstSrcIdAtomicRmw:
|
|
return ir_analyze_instruction_atomic_rmw(ira, (IrInstSrcAtomicRmw *)instruction);
|
|
case IrInstSrcIdAtomicLoad:
|
|
return ir_analyze_instruction_atomic_load(ira, (IrInstSrcAtomicLoad *)instruction);
|
|
case IrInstSrcIdAtomicStore:
|
|
return ir_analyze_instruction_atomic_store(ira, (IrInstSrcAtomicStore *)instruction);
|
|
case IrInstSrcIdSaveErrRetAddr:
|
|
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstSrcSaveErrRetAddr *)instruction);
|
|
case IrInstSrcIdAddImplicitReturnType:
|
|
return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstSrcAddImplicitReturnType *)instruction);
|
|
case IrInstSrcIdFloatOp:
|
|
return ir_analyze_instruction_float_op(ira, (IrInstSrcFloatOp *)instruction);
|
|
case IrInstSrcIdMulAdd:
|
|
return ir_analyze_instruction_mul_add(ira, (IrInstSrcMulAdd *)instruction);
|
|
case IrInstSrcIdIntToErr:
|
|
return ir_analyze_instruction_int_to_err(ira, (IrInstSrcIntToErr *)instruction);
|
|
case IrInstSrcIdErrToInt:
|
|
return ir_analyze_instruction_err_to_int(ira, (IrInstSrcErrToInt *)instruction);
|
|
case IrInstSrcIdIntToEnum:
|
|
return ir_analyze_instruction_int_to_enum(ira, (IrInstSrcIntToEnum *)instruction);
|
|
case IrInstSrcIdEnumToInt:
|
|
return ir_analyze_instruction_enum_to_int(ira, (IrInstSrcEnumToInt *)instruction);
|
|
case IrInstSrcIdCheckRuntimeScope:
|
|
return ir_analyze_instruction_check_runtime_scope(ira, (IrInstSrcCheckRuntimeScope *)instruction);
|
|
case IrInstSrcIdHasDecl:
|
|
return ir_analyze_instruction_has_decl(ira, (IrInstSrcHasDecl *)instruction);
|
|
case IrInstSrcIdUndeclaredIdent:
|
|
return ir_analyze_instruction_undeclared_ident(ira, (IrInstSrcUndeclaredIdent *)instruction);
|
|
case IrInstSrcIdAlloca:
|
|
return nullptr;
|
|
case IrInstSrcIdEndExpr:
|
|
return ir_analyze_instruction_end_expr(ira, (IrInstSrcEndExpr *)instruction);
|
|
case IrInstSrcIdBitCast:
|
|
return ir_analyze_instruction_bit_cast_src(ira, (IrInstSrcBitCast *)instruction);
|
|
case IrInstSrcIdUnionInitNamedField:
|
|
return ir_analyze_instruction_union_init_named_field(ira, (IrInstSrcUnionInitNamedField *)instruction);
|
|
case IrInstSrcIdSuspendBegin:
|
|
return ir_analyze_instruction_suspend_begin(ira, (IrInstSrcSuspendBegin *)instruction);
|
|
case IrInstSrcIdSuspendFinish:
|
|
return ir_analyze_instruction_suspend_finish(ira, (IrInstSrcSuspendFinish *)instruction);
|
|
case IrInstSrcIdResume:
|
|
return ir_analyze_instruction_resume(ira, (IrInstSrcResume *)instruction);
|
|
case IrInstSrcIdAwait:
|
|
return ir_analyze_instruction_await(ira, (IrInstSrcAwait *)instruction);
|
|
case IrInstSrcIdSpillBegin:
|
|
return ir_analyze_instruction_spill_begin(ira, (IrInstSrcSpillBegin *)instruction);
|
|
case IrInstSrcIdSpillEnd:
|
|
return ir_analyze_instruction_spill_end(ira, (IrInstSrcSpillEnd *)instruction);
|
|
case IrInstSrcIdWasmMemorySize:
|
|
return ir_analyze_instruction_wasm_memory_size(ira, (IrInstSrcWasmMemorySize *)instruction);
|
|
case IrInstSrcIdWasmMemoryGrow:
|
|
return ir_analyze_instruction_wasm_memory_grow(ira, (IrInstSrcWasmMemoryGrow *)instruction);
|
|
case IrInstSrcIdSrc:
|
|
return ir_analyze_instruction_src(ira, (IrInstSrcSrc *)instruction);
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
// This function attempts to evaluate IR code while doing type checking and other analysis.
|
|
// It emits to a new IrExecutableGen which is partially evaluated IR code.
|
|
ZigType *ir_analyze(CodeGen *codegen, IrExecutableSrc *old_exec, IrExecutableGen *new_exec,
|
|
ZigType *expected_type, AstNode *expected_type_source_node, ZigValue *result_ptr)
|
|
{
|
|
assert(old_exec->first_err_trace_msg == nullptr);
|
|
assert(expected_type == nullptr || !type_is_invalid(expected_type));
|
|
|
|
IrAnalyze *ira = heap::c_allocator.create<IrAnalyze>();
|
|
ira->ref_count = 1;
|
|
old_exec->analysis = ira;
|
|
ira->codegen = codegen;
|
|
|
|
ira->explicit_return_type = expected_type;
|
|
ira->explicit_return_type_source_node = expected_type_source_node;
|
|
|
|
ira->old_irb.codegen = codegen;
|
|
ira->old_irb.exec = old_exec;
|
|
|
|
ira->new_irb.codegen = codegen;
|
|
ira->new_irb.exec = new_exec;
|
|
|
|
IrBasicBlockSrc *old_entry_bb = ira->old_irb.exec->basic_block_list.at(0);
|
|
IrBasicBlockGen *new_entry_bb = ir_get_new_bb(ira, old_entry_bb, nullptr);
|
|
ira->new_irb.current_basic_block = new_entry_bb;
|
|
ira->old_bb_index = 0;
|
|
|
|
ir_start_bb(ira, old_entry_bb, nullptr);
|
|
|
|
if (result_ptr != nullptr) {
|
|
assert(result_ptr->type->id == ZigTypeIdPointer);
|
|
IrInstGenConst *const_inst = ir_create_inst_noval<IrInstGenConst>(
|
|
&ira->new_irb, new_exec->begin_scope, new_exec->source_node);
|
|
const_inst->base.value = result_ptr;
|
|
ira->return_ptr = &const_inst->base;
|
|
} else {
|
|
assert(new_exec->begin_scope != nullptr);
|
|
assert(new_exec->source_node != nullptr);
|
|
ira->return_ptr = ir_build_return_ptr(ira, new_exec->begin_scope, new_exec->source_node,
|
|
get_pointer_to_type(codegen, expected_type, false));
|
|
}
|
|
|
|
while (ira->old_bb_index < ira->old_irb.exec->basic_block_list.length) {
|
|
IrInstSrc *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
|
|
|
|
if (old_instruction->base.ref_count == 0 && !ir_inst_src_has_side_effects(old_instruction)) {
|
|
ira->instruction_index += 1;
|
|
continue;
|
|
}
|
|
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "~ ");
|
|
old_instruction->src();
|
|
fprintf(stderr, "~ ");
|
|
ir_print_inst_src(codegen, stderr, old_instruction, 0);
|
|
bool want_break = false;
|
|
if (ira->break_debug_id == old_instruction->base.debug_id) {
|
|
want_break = true;
|
|
} else if (old_instruction->base.source_node != nullptr) {
|
|
for (size_t i = 0; i < dbg_ir_breakpoints_count; i += 1) {
|
|
if (dbg_ir_breakpoints_buf[i].line == old_instruction->base.source_node->line + 1 &&
|
|
buf_ends_with_str(old_instruction->base.source_node->owner->data.structure.root_struct->path,
|
|
dbg_ir_breakpoints_buf[i].src_file))
|
|
{
|
|
want_break = true;
|
|
}
|
|
}
|
|
}
|
|
if (want_break) BREAKPOINT;
|
|
}
|
|
IrInstGen *new_instruction = ir_analyze_instruction_base(ira, old_instruction);
|
|
if (new_instruction != nullptr) {
|
|
ir_assert(new_instruction->value->type != nullptr || new_instruction->value->type != nullptr, &old_instruction->base);
|
|
old_instruction->child = new_instruction;
|
|
|
|
if (type_is_invalid(new_instruction->value->type)) {
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "-> (invalid)");
|
|
}
|
|
|
|
if (new_exec->first_err_trace_msg != nullptr) {
|
|
ira->codegen->trace_err = new_exec->first_err_trace_msg;
|
|
} else {
|
|
new_exec->first_err_trace_msg = ira->codegen->trace_err;
|
|
}
|
|
if (new_exec->first_err_trace_msg != nullptr &&
|
|
!old_instruction->base.source_node->already_traced_this_node)
|
|
{
|
|
old_instruction->base.source_node->already_traced_this_node = true;
|
|
new_exec->first_err_trace_msg = add_error_note(ira->codegen, new_exec->first_err_trace_msg,
|
|
old_instruction->base.source_node, buf_create_from_str("referenced here"));
|
|
}
|
|
return ira->codegen->builtin_types.entry_invalid;
|
|
} else if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "-> ");
|
|
if (new_instruction->value->type->id == ZigTypeIdUnreachable) {
|
|
fprintf(stderr, "(noreturn)\n");
|
|
} else {
|
|
ir_print_inst_gen(codegen, stderr, new_instruction, 0);
|
|
}
|
|
}
|
|
|
|
// unreachable instructions do their own control flow.
|
|
if (new_instruction->value->type->id == ZigTypeIdUnreachable)
|
|
continue;
|
|
} else {
|
|
if (ira->codegen->verbose_ir) {
|
|
fprintf(stderr, "-> (null");
|
|
}
|
|
}
|
|
|
|
ira->instruction_index += 1;
|
|
}
|
|
|
|
ZigType *res_type;
|
|
if (new_exec->first_err_trace_msg != nullptr) {
|
|
codegen->trace_err = new_exec->first_err_trace_msg;
|
|
if (codegen->trace_err != nullptr && new_exec->source_node != nullptr &&
|
|
!new_exec->source_node->already_traced_this_node)
|
|
{
|
|
new_exec->source_node->already_traced_this_node = true;
|
|
codegen->trace_err = add_error_note(codegen, codegen->trace_err,
|
|
new_exec->source_node, buf_create_from_str("referenced here"));
|
|
}
|
|
res_type = ira->codegen->builtin_types.entry_invalid;
|
|
} else if (ira->src_implicit_return_type_list.length == 0) {
|
|
res_type = codegen->builtin_types.entry_unreachable;
|
|
} else {
|
|
res_type = ir_resolve_peer_types(ira, expected_type_source_node, expected_type, ira->src_implicit_return_type_list.items,
|
|
ira->src_implicit_return_type_list.length);
|
|
}
|
|
|
|
// It is now safe to free Pass 1 IR instructions.
|
|
ira_deref(ira);
|
|
|
|
return res_type;
|
|
}
|
|
|
|
bool ir_inst_gen_has_side_effects(IrInstGen *instruction) {
|
|
switch (instruction->id) {
|
|
case IrInstGenIdInvalid:
|
|
zig_unreachable();
|
|
case IrInstGenIdBr:
|
|
case IrInstGenIdCondBr:
|
|
case IrInstGenIdSwitchBr:
|
|
case IrInstGenIdDeclVar:
|
|
case IrInstGenIdStorePtr:
|
|
case IrInstGenIdVectorStoreElem:
|
|
case IrInstGenIdCall:
|
|
case IrInstGenIdReturn:
|
|
case IrInstGenIdUnreachable:
|
|
case IrInstGenIdFence:
|
|
case IrInstGenIdMemset:
|
|
case IrInstGenIdMemcpy:
|
|
case IrInstGenIdBreakpoint:
|
|
case IrInstGenIdOverflowOp: // TODO when we support multiple returns this can be side effect free
|
|
case IrInstGenIdPanic:
|
|
case IrInstGenIdSaveErrRetAddr:
|
|
case IrInstGenIdAtomicRmw:
|
|
case IrInstGenIdAtomicStore:
|
|
case IrInstGenIdCmpxchg:
|
|
case IrInstGenIdAssertZero:
|
|
case IrInstGenIdAssertNonNull:
|
|
case IrInstGenIdPtrOfArrayToSlice:
|
|
case IrInstGenIdSlice:
|
|
case IrInstGenIdOptionalWrap:
|
|
case IrInstGenIdVectorToArray:
|
|
case IrInstGenIdSuspendBegin:
|
|
case IrInstGenIdSuspendFinish:
|
|
case IrInstGenIdResume:
|
|
case IrInstGenIdAwait:
|
|
case IrInstGenIdSpillBegin:
|
|
case IrInstGenIdWasmMemoryGrow:
|
|
case IrInstGenIdExtern:
|
|
return true;
|
|
|
|
case IrInstGenIdPhi:
|
|
case IrInstGenIdBinOp:
|
|
case IrInstGenIdConst:
|
|
case IrInstGenIdCast:
|
|
case IrInstGenIdElemPtr:
|
|
case IrInstGenIdVarPtr:
|
|
case IrInstGenIdReturnPtr:
|
|
case IrInstGenIdStructFieldPtr:
|
|
case IrInstGenIdTestNonNull:
|
|
case IrInstGenIdClz:
|
|
case IrInstGenIdCtz:
|
|
case IrInstGenIdPopCount:
|
|
case IrInstGenIdBswap:
|
|
case IrInstGenIdBitReverse:
|
|
case IrInstGenIdUnionTag:
|
|
case IrInstGenIdTruncate:
|
|
case IrInstGenIdShuffleVector:
|
|
case IrInstGenIdSplat:
|
|
case IrInstGenIdBoolNot:
|
|
case IrInstGenIdReturnAddress:
|
|
case IrInstGenIdFrameAddress:
|
|
case IrInstGenIdFrameHandle:
|
|
case IrInstGenIdFrameSize:
|
|
case IrInstGenIdTestErr:
|
|
case IrInstGenIdPtrCast:
|
|
case IrInstGenIdBitCast:
|
|
case IrInstGenIdWidenOrShorten:
|
|
case IrInstGenIdPtrToInt:
|
|
case IrInstGenIdIntToPtr:
|
|
case IrInstGenIdIntToEnum:
|
|
case IrInstGenIdIntToErr:
|
|
case IrInstGenIdErrToInt:
|
|
case IrInstGenIdErrName:
|
|
case IrInstGenIdTagName:
|
|
case IrInstGenIdFieldParentPtr:
|
|
case IrInstGenIdAlignCast:
|
|
case IrInstGenIdErrorReturnTrace:
|
|
case IrInstGenIdFloatOp:
|
|
case IrInstGenIdMulAdd:
|
|
case IrInstGenIdAtomicLoad:
|
|
case IrInstGenIdArrayToVector:
|
|
case IrInstGenIdAlloca:
|
|
case IrInstGenIdSpillEnd:
|
|
case IrInstGenIdVectorExtractElem:
|
|
case IrInstGenIdBinaryNot:
|
|
case IrInstGenIdNegation:
|
|
case IrInstGenIdWasmMemorySize:
|
|
case IrInstGenIdReduce:
|
|
return false;
|
|
|
|
case IrInstGenIdAsm:
|
|
{
|
|
IrInstGenAsm *asm_instruction = (IrInstGenAsm *)instruction;
|
|
return asm_instruction->has_side_effects;
|
|
}
|
|
case IrInstGenIdUnwrapErrPayload:
|
|
{
|
|
IrInstGenUnwrapErrPayload *unwrap_err_payload_instruction =
|
|
(IrInstGenUnwrapErrPayload *)instruction;
|
|
return unwrap_err_payload_instruction->safety_check_on ||
|
|
unwrap_err_payload_instruction->initializing;
|
|
}
|
|
case IrInstGenIdUnwrapErrCode:
|
|
return reinterpret_cast<IrInstGenUnwrapErrCode *>(instruction)->initializing;
|
|
case IrInstGenIdUnionFieldPtr:
|
|
return reinterpret_cast<IrInstGenUnionFieldPtr *>(instruction)->initializing;
|
|
case IrInstGenIdOptionalUnwrapPtr:
|
|
return reinterpret_cast<IrInstGenOptionalUnwrapPtr *>(instruction)->initializing;
|
|
case IrInstGenIdErrWrapPayload:
|
|
return reinterpret_cast<IrInstGenErrWrapPayload *>(instruction)->result_loc != nullptr;
|
|
case IrInstGenIdErrWrapCode:
|
|
return reinterpret_cast<IrInstGenErrWrapCode *>(instruction)->result_loc != nullptr;
|
|
case IrInstGenIdLoadPtr:
|
|
return reinterpret_cast<IrInstGenLoadPtr *>(instruction)->result_loc != nullptr;
|
|
case IrInstGenIdRef:
|
|
return reinterpret_cast<IrInstGenRef *>(instruction)->result_loc != nullptr;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
bool ir_inst_src_has_side_effects(IrInstSrc *instruction) {
|
|
switch (instruction->id) {
|
|
case IrInstSrcIdInvalid:
|
|
zig_unreachable();
|
|
case IrInstSrcIdBr:
|
|
case IrInstSrcIdCondBr:
|
|
case IrInstSrcIdSwitchBr:
|
|
case IrInstSrcIdDeclVar:
|
|
case IrInstSrcIdStorePtr:
|
|
case IrInstSrcIdCallExtra:
|
|
case IrInstSrcIdAsyncCallExtra:
|
|
case IrInstSrcIdCall:
|
|
case IrInstSrcIdCallArgs:
|
|
case IrInstSrcIdReturn:
|
|
case IrInstSrcIdUnreachable:
|
|
case IrInstSrcIdSetCold:
|
|
case IrInstSrcIdSetRuntimeSafety:
|
|
case IrInstSrcIdSetFloatMode:
|
|
case IrInstSrcIdImport:
|
|
case IrInstSrcIdCompileErr:
|
|
case IrInstSrcIdCompileLog:
|
|
case IrInstSrcIdCImport:
|
|
case IrInstSrcIdCInclude:
|
|
case IrInstSrcIdCDefine:
|
|
case IrInstSrcIdCUndef:
|
|
case IrInstSrcIdFence:
|
|
case IrInstSrcIdMemset:
|
|
case IrInstSrcIdMemcpy:
|
|
case IrInstSrcIdBreakpoint:
|
|
case IrInstSrcIdOverflowOp: // TODO when we support multiple returns this can be side effect free
|
|
case IrInstSrcIdCheckSwitchProngs:
|
|
case IrInstSrcIdCheckStatementIsVoid:
|
|
case IrInstSrcIdCheckRuntimeScope:
|
|
case IrInstSrcIdPanic:
|
|
case IrInstSrcIdSetEvalBranchQuota:
|
|
case IrInstSrcIdPtrType:
|
|
case IrInstSrcIdSetAlignStack:
|
|
case IrInstSrcIdExport:
|
|
case IrInstSrcIdExtern:
|
|
case IrInstSrcIdSaveErrRetAddr:
|
|
case IrInstSrcIdAddImplicitReturnType:
|
|
case IrInstSrcIdAtomicRmw:
|
|
case IrInstSrcIdAtomicStore:
|
|
case IrInstSrcIdCmpxchg:
|
|
case IrInstSrcIdUndeclaredIdent:
|
|
case IrInstSrcIdEndExpr:
|
|
case IrInstSrcIdResetResult:
|
|
case IrInstSrcIdSuspendBegin:
|
|
case IrInstSrcIdSuspendFinish:
|
|
case IrInstSrcIdResume:
|
|
case IrInstSrcIdAwait:
|
|
case IrInstSrcIdSpillBegin:
|
|
case IrInstSrcIdWasmMemoryGrow:
|
|
return true;
|
|
|
|
case IrInstSrcIdPhi:
|
|
case IrInstSrcIdUnOp:
|
|
case IrInstSrcIdBinOp:
|
|
case IrInstSrcIdMergeErrSets:
|
|
case IrInstSrcIdLoadPtr:
|
|
case IrInstSrcIdConst:
|
|
case IrInstSrcIdContainerInitList:
|
|
case IrInstSrcIdContainerInitFields:
|
|
case IrInstSrcIdUnionInitNamedField:
|
|
case IrInstSrcIdFieldPtr:
|
|
case IrInstSrcIdElemPtr:
|
|
case IrInstSrcIdVarPtr:
|
|
case IrInstSrcIdTypeOf:
|
|
case IrInstSrcIdArrayType:
|
|
case IrInstSrcIdSliceType:
|
|
case IrInstSrcIdAnyFrameType:
|
|
case IrInstSrcIdSizeOf:
|
|
case IrInstSrcIdTestNonNull:
|
|
case IrInstSrcIdOptionalUnwrapPtr:
|
|
case IrInstSrcIdClz:
|
|
case IrInstSrcIdCtz:
|
|
case IrInstSrcIdPopCount:
|
|
case IrInstSrcIdBswap:
|
|
case IrInstSrcIdBitReverse:
|
|
case IrInstSrcIdSwitchVar:
|
|
case IrInstSrcIdSwitchElseVar:
|
|
case IrInstSrcIdSwitchTarget:
|
|
case IrInstSrcIdRef:
|
|
case IrInstSrcIdEmbedFile:
|
|
case IrInstSrcIdTruncate:
|
|
case IrInstSrcIdVectorType:
|
|
case IrInstSrcIdShuffleVector:
|
|
case IrInstSrcIdSplat:
|
|
case IrInstSrcIdBoolNot:
|
|
case IrInstSrcIdSlice:
|
|
case IrInstSrcIdAlignOf:
|
|
case IrInstSrcIdReturnAddress:
|
|
case IrInstSrcIdFrameAddress:
|
|
case IrInstSrcIdFrameHandle:
|
|
case IrInstSrcIdFrameType:
|
|
case IrInstSrcIdFrameSize:
|
|
case IrInstSrcIdTestErr:
|
|
case IrInstSrcIdFnProto:
|
|
case IrInstSrcIdTestComptime:
|
|
case IrInstSrcIdPtrCast:
|
|
case IrInstSrcIdBitCast:
|
|
case IrInstSrcIdPtrToInt:
|
|
case IrInstSrcIdIntToPtr:
|
|
case IrInstSrcIdIntToEnum:
|
|
case IrInstSrcIdIntToErr:
|
|
case IrInstSrcIdErrToInt:
|
|
case IrInstSrcIdDeclRef:
|
|
case IrInstSrcIdErrName:
|
|
case IrInstSrcIdTypeName:
|
|
case IrInstSrcIdTagName:
|
|
case IrInstSrcIdFieldParentPtr:
|
|
case IrInstSrcIdByteOffsetOf:
|
|
case IrInstSrcIdBitOffsetOf:
|
|
case IrInstSrcIdTypeInfo:
|
|
case IrInstSrcIdType:
|
|
case IrInstSrcIdHasField:
|
|
case IrInstSrcIdAlignCast:
|
|
case IrInstSrcIdImplicitCast:
|
|
case IrInstSrcIdResolveResult:
|
|
case IrInstSrcIdArgType:
|
|
case IrInstSrcIdTagType:
|
|
case IrInstSrcIdErrorReturnTrace:
|
|
case IrInstSrcIdErrorUnion:
|
|
case IrInstSrcIdFloatOp:
|
|
case IrInstSrcIdMulAdd:
|
|
case IrInstSrcIdAtomicLoad:
|
|
case IrInstSrcIdIntCast:
|
|
case IrInstSrcIdFloatCast:
|
|
case IrInstSrcIdErrSetCast:
|
|
case IrInstSrcIdIntToFloat:
|
|
case IrInstSrcIdFloatToInt:
|
|
case IrInstSrcIdBoolToInt:
|
|
case IrInstSrcIdEnumToInt:
|
|
case IrInstSrcIdHasDecl:
|
|
case IrInstSrcIdAlloca:
|
|
case IrInstSrcIdSpillEnd:
|
|
case IrInstSrcIdWasmMemorySize:
|
|
case IrInstSrcIdSrc:
|
|
case IrInstSrcIdReduce:
|
|
return false;
|
|
|
|
case IrInstSrcIdAsm:
|
|
{
|
|
IrInstSrcAsm *asm_instruction = (IrInstSrcAsm *)instruction;
|
|
return asm_instruction->has_side_effects;
|
|
}
|
|
|
|
case IrInstSrcIdUnwrapErrPayload:
|
|
{
|
|
IrInstSrcUnwrapErrPayload *unwrap_err_payload_instruction =
|
|
(IrInstSrcUnwrapErrPayload *)instruction;
|
|
return unwrap_err_payload_instruction->safety_check_on ||
|
|
unwrap_err_payload_instruction->initializing;
|
|
}
|
|
case IrInstSrcIdUnwrapErrCode:
|
|
return reinterpret_cast<IrInstSrcUnwrapErrCode *>(instruction)->initializing;
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
static ZigType *ir_resolve_lazy_fn_type(IrAnalyze *ira, AstNode *source_node, LazyValueFnType *lazy_fn_type) {
|
|
Error err;
|
|
AstNode *proto_node = lazy_fn_type->proto_node;
|
|
|
|
FnTypeId fn_type_id = {0};
|
|
init_fn_type_id(&fn_type_id, proto_node, lazy_fn_type->cc, proto_node->data.fn_proto.params.length);
|
|
|
|
for (; fn_type_id.next_param_index < fn_type_id.param_count; fn_type_id.next_param_index += 1) {
|
|
AstNode *param_node = proto_node->data.fn_proto.params.at(fn_type_id.next_param_index);
|
|
assert(param_node->type == NodeTypeParamDecl);
|
|
|
|
bool param_is_var_args = param_node->data.param_decl.is_var_args;
|
|
if (param_is_var_args) {
|
|
if (fn_type_id.cc == CallingConventionC) {
|
|
fn_type_id.param_count = fn_type_id.next_param_index;
|
|
break;
|
|
} else {
|
|
ir_add_error_node(ira, param_node,
|
|
buf_sprintf("var args only allowed in functions with C calling convention"));
|
|
return nullptr;
|
|
}
|
|
}
|
|
FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
|
|
param_info->is_noalias = param_node->data.param_decl.is_noalias;
|
|
|
|
if (lazy_fn_type->param_types[fn_type_id.next_param_index] == nullptr) {
|
|
param_info->type = nullptr;
|
|
return get_generic_fn_type(ira->codegen, &fn_type_id);
|
|
} else {
|
|
IrInstGen *param_type_inst = lazy_fn_type->param_types[fn_type_id.next_param_index];
|
|
ZigType *param_type = ir_resolve_type(ira, param_type_inst);
|
|
if (type_is_invalid(param_type))
|
|
return nullptr;
|
|
|
|
if(!is_valid_param_type(param_type)){
|
|
if(param_type->id == ZigTypeIdOpaque){
|
|
ir_add_error(ira, ¶m_type_inst->base,
|
|
buf_sprintf("parameter of opaque type '%s' not allowed", buf_ptr(¶m_type->name)));
|
|
} else {
|
|
ir_add_error(ira, ¶m_type_inst->base,
|
|
buf_sprintf("parameter of type '%s' not allowed", buf_ptr(¶m_type->name)));
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
switch (type_requires_comptime(ira->codegen, param_type)) {
|
|
case ReqCompTimeYes:
|
|
if (!calling_convention_allows_zig_types(fn_type_id.cc)) {
|
|
ir_add_error(ira, ¶m_type_inst->base,
|
|
buf_sprintf("parameter of type '%s' not allowed in function with calling convention '%s'",
|
|
buf_ptr(¶m_type->name), calling_convention_name(fn_type_id.cc)));
|
|
return nullptr;
|
|
}
|
|
param_info->type = param_type;
|
|
fn_type_id.next_param_index += 1;
|
|
return get_generic_fn_type(ira->codegen, &fn_type_id);
|
|
case ReqCompTimeInvalid:
|
|
return nullptr;
|
|
case ReqCompTimeNo:
|
|
break;
|
|
}
|
|
if (!calling_convention_allows_zig_types(fn_type_id.cc)) {
|
|
bool has_bits;
|
|
if ((err = type_has_bits2(ira->codegen, param_type, &has_bits)))
|
|
return nullptr;
|
|
if (!has_bits) {
|
|
ir_add_error(ira, ¶m_type_inst->base,
|
|
buf_sprintf("parameter of type '%s' has 0 bits; not allowed in function with calling convention '%s'",
|
|
buf_ptr(¶m_type->name), calling_convention_name(fn_type_id.cc)));
|
|
return nullptr;
|
|
}
|
|
}
|
|
param_info->type = param_type;
|
|
}
|
|
}
|
|
|
|
if (lazy_fn_type->align_inst != nullptr) {
|
|
if (!ir_resolve_align(ira, lazy_fn_type->align_inst, nullptr, &fn_type_id.alignment))
|
|
return nullptr;
|
|
}
|
|
|
|
fn_type_id.return_type = ir_resolve_type(ira, lazy_fn_type->return_type);
|
|
if (type_is_invalid(fn_type_id.return_type))
|
|
return nullptr;
|
|
if (fn_type_id.return_type->id == ZigTypeIdOpaque) {
|
|
ir_add_error(ira, &lazy_fn_type->return_type->base, buf_create_from_str("return type cannot be opaque"));
|
|
return nullptr;
|
|
}
|
|
|
|
return get_fn_type(ira->codegen, &fn_type_id);
|
|
}
|
|
|
|
static Error ir_resolve_lazy_raw(AstNode *source_node, ZigValue *val) {
|
|
Error err;
|
|
if (val->special != ConstValSpecialLazy)
|
|
return ErrorNone;
|
|
switch (val->data.x_lazy->id) {
|
|
case LazyValueIdInvalid:
|
|
zig_unreachable();
|
|
case LazyValueIdTypeInfoDecls: {
|
|
LazyValueTypeInfoDecls *type_info_decls = reinterpret_cast<LazyValueTypeInfoDecls *>(val->data.x_lazy);
|
|
IrAnalyze *ira = type_info_decls->ira;
|
|
|
|
if ((err = ir_make_type_info_decls(ira, type_info_decls->source_instr, val, type_info_decls->decls_scope, true)))
|
|
{
|
|
return err;
|
|
};
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdAlignOf: {
|
|
LazyValueAlignOf *lazy_align_of = reinterpret_cast<LazyValueAlignOf *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_align_of->ira;
|
|
|
|
if (lazy_align_of->target_type->value->special == ConstValSpecialStatic) {
|
|
switch (lazy_align_of->target_type->value->data.x_type->id) {
|
|
case ZigTypeIdInvalid:
|
|
zig_unreachable();
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdOpaque:
|
|
ir_add_error(ira, &lazy_align_of->target_type->base,
|
|
buf_sprintf("no align available for type '%s'",
|
|
buf_ptr(&lazy_align_of->target_type->value->data.x_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdVector:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
break;
|
|
}
|
|
}
|
|
|
|
uint32_t align_in_bytes;
|
|
if ((err = type_val_resolve_abi_align(ira->codegen, source_node,
|
|
lazy_align_of->target_type->value, &align_in_bytes)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
val->special = ConstValSpecialStatic;
|
|
assert(val->type->id == ZigTypeIdComptimeInt || val->type->id == ZigTypeIdInt);
|
|
bigint_init_unsigned(&val->data.x_bigint, align_in_bytes);
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdSizeOf: {
|
|
LazyValueSizeOf *lazy_size_of = reinterpret_cast<LazyValueSizeOf *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_size_of->ira;
|
|
|
|
if (lazy_size_of->target_type->value->special == ConstValSpecialStatic) {
|
|
switch (lazy_size_of->target_type->value->data.x_type->id) {
|
|
case ZigTypeIdInvalid: // handled above
|
|
zig_unreachable();
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdOpaque:
|
|
ir_add_error(ira, &lazy_size_of->target_type->base,
|
|
buf_sprintf("no size available for type '%s'",
|
|
buf_ptr(&lazy_size_of->target_type->value->data.x_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdVector:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
break;
|
|
}
|
|
}
|
|
|
|
size_t abi_size;
|
|
size_t size_in_bits;
|
|
if ((err = type_val_resolve_abi_size(ira->codegen, source_node, lazy_size_of->target_type->value,
|
|
&abi_size, &size_in_bits)))
|
|
{
|
|
return err;
|
|
}
|
|
|
|
val->special = ConstValSpecialStatic;
|
|
assert(val->type->id == ZigTypeIdComptimeInt || val->type->id == ZigTypeIdInt);
|
|
if (lazy_size_of->bit_size)
|
|
bigint_init_unsigned(&val->data.x_bigint, size_in_bits);
|
|
else
|
|
bigint_init_unsigned(&val->data.x_bigint, abi_size);
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdSliceType: {
|
|
LazyValueSliceType *lazy_slice_type = reinterpret_cast<LazyValueSliceType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_slice_type->ira;
|
|
|
|
ZigType *elem_type = ir_resolve_type(ira, lazy_slice_type->elem_type);
|
|
if (type_is_invalid(elem_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
ZigValue *sentinel_val;
|
|
if (lazy_slice_type->sentinel != nullptr) {
|
|
if (type_is_invalid(lazy_slice_type->sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
IrInstGen *sentinel = ir_implicit_cast(ira, lazy_slice_type->sentinel, elem_type);
|
|
if (type_is_invalid(sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
sentinel_val = ir_resolve_const(ira, sentinel, UndefBad);
|
|
if (sentinel_val == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else {
|
|
sentinel_val = nullptr;
|
|
}
|
|
|
|
uint32_t align_bytes = 0;
|
|
if (lazy_slice_type->align_inst != nullptr) {
|
|
if (!ir_resolve_align(ira, lazy_slice_type->align_inst, elem_type, &align_bytes))
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
switch (elem_type->id) {
|
|
case ZigTypeIdInvalid: // handled above
|
|
zig_unreachable();
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdOpaque:
|
|
ir_add_error(ira, &lazy_slice_type->elem_type->base,
|
|
buf_sprintf("slice of type '%s' not allowed", buf_ptr(&elem_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdVector:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
break;
|
|
}
|
|
|
|
ResolveStatus needed_status = (align_bytes == 0) ?
|
|
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
|
|
if ((err = type_resolve(ira->codegen, elem_type, needed_status)))
|
|
return err;
|
|
ZigType *slice_ptr_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
|
lazy_slice_type->is_const, lazy_slice_type->is_volatile,
|
|
PtrLenUnknown,
|
|
align_bytes,
|
|
0, 0, lazy_slice_type->is_allowzero,
|
|
VECTOR_INDEX_NONE, nullptr, sentinel_val);
|
|
val->special = ConstValSpecialStatic;
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = get_slice_type(ira->codegen, slice_ptr_type);
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdPtrType: {
|
|
LazyValuePtrType *lazy_ptr_type = reinterpret_cast<LazyValuePtrType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_ptr_type->ira;
|
|
|
|
ZigType *elem_type = ir_resolve_type(ira, lazy_ptr_type->elem_type);
|
|
if (type_is_invalid(elem_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
ZigValue *sentinel_val;
|
|
if (lazy_ptr_type->sentinel != nullptr) {
|
|
if (type_is_invalid(lazy_ptr_type->sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
IrInstGen *sentinel = ir_implicit_cast(ira, lazy_ptr_type->sentinel, elem_type);
|
|
if (type_is_invalid(sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
sentinel_val = ir_resolve_const(ira, sentinel, UndefBad);
|
|
if (sentinel_val == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else {
|
|
sentinel_val = nullptr;
|
|
}
|
|
|
|
uint32_t align_bytes = 0;
|
|
if (lazy_ptr_type->align_inst != nullptr) {
|
|
if (!ir_resolve_align(ira, lazy_ptr_type->align_inst, elem_type, &align_bytes))
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if (elem_type->id == ZigTypeIdUnreachable) {
|
|
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
|
buf_create_from_str("pointer to noreturn not allowed"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else if (elem_type->id == ZigTypeIdOpaque && lazy_ptr_type->ptr_len == PtrLenUnknown) {
|
|
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
|
buf_create_from_str("unknown-length pointer to opaque"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else if (lazy_ptr_type->ptr_len == PtrLenC) {
|
|
bool ok_type;
|
|
if ((err = type_allowed_in_extern(ira->codegen, elem_type, ExternPositionOther, &ok_type)))
|
|
return err;
|
|
if (!ok_type) {
|
|
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
|
buf_sprintf("C pointers cannot point to non-C-ABI-compatible type '%s'",
|
|
buf_ptr(&elem_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else if (elem_type->id == ZigTypeIdOpaque) {
|
|
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
|
buf_sprintf("C pointers cannot point to opaque types"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
} else if (lazy_ptr_type->is_allowzero) {
|
|
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
|
buf_sprintf("C pointers always allow address zero"));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
}
|
|
|
|
if (align_bytes != 0) {
|
|
if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusAlignmentKnown)))
|
|
return err;
|
|
if (!type_has_bits(ira->codegen, elem_type))
|
|
align_bytes = 0;
|
|
}
|
|
bool allow_zero = lazy_ptr_type->is_allowzero || lazy_ptr_type->ptr_len == PtrLenC;
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
|
lazy_ptr_type->is_const, lazy_ptr_type->is_volatile, lazy_ptr_type->ptr_len, align_bytes,
|
|
lazy_ptr_type->bit_offset_in_host, lazy_ptr_type->host_int_bytes,
|
|
allow_zero, VECTOR_INDEX_NONE, nullptr, sentinel_val);
|
|
val->special = ConstValSpecialStatic;
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdArrayType: {
|
|
LazyValueArrayType *lazy_array_type = reinterpret_cast<LazyValueArrayType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_array_type->ira;
|
|
|
|
ZigType *elem_type = ir_resolve_type(ira, lazy_array_type->elem_type);
|
|
if (type_is_invalid(elem_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
switch (elem_type->id) {
|
|
case ZigTypeIdInvalid: // handled above
|
|
zig_unreachable();
|
|
case ZigTypeIdUnreachable:
|
|
case ZigTypeIdUndefined:
|
|
case ZigTypeIdNull:
|
|
case ZigTypeIdOpaque:
|
|
ir_add_error(ira, &lazy_array_type->elem_type->base,
|
|
buf_sprintf("array of type '%s' not allowed",
|
|
buf_ptr(&elem_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
case ZigTypeIdMetaType:
|
|
case ZigTypeIdVoid:
|
|
case ZigTypeIdBool:
|
|
case ZigTypeIdInt:
|
|
case ZigTypeIdFloat:
|
|
case ZigTypeIdPointer:
|
|
case ZigTypeIdArray:
|
|
case ZigTypeIdStruct:
|
|
case ZigTypeIdComptimeFloat:
|
|
case ZigTypeIdComptimeInt:
|
|
case ZigTypeIdEnumLiteral:
|
|
case ZigTypeIdOptional:
|
|
case ZigTypeIdErrorUnion:
|
|
case ZigTypeIdErrorSet:
|
|
case ZigTypeIdEnum:
|
|
case ZigTypeIdUnion:
|
|
case ZigTypeIdFn:
|
|
case ZigTypeIdBoundFn:
|
|
case ZigTypeIdVector:
|
|
case ZigTypeIdFnFrame:
|
|
case ZigTypeIdAnyFrame:
|
|
break;
|
|
}
|
|
|
|
// Avoid resolving the type if the total length is zero.
|
|
// Matches the logic in get_array_type and in the lazy alignment
|
|
// resolution routine.
|
|
if (lazy_array_type->length + (lazy_array_type->sentinel != nullptr) != 0) {
|
|
if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
}
|
|
|
|
ZigValue *sentinel_val = nullptr;
|
|
if (lazy_array_type->sentinel != nullptr) {
|
|
if (type_is_invalid(lazy_array_type->sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
IrInstGen *sentinel = ir_implicit_cast(ira, lazy_array_type->sentinel, elem_type);
|
|
if (type_is_invalid(sentinel->value->type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
sentinel_val = ir_resolve_const(ira, sentinel, UndefBad);
|
|
if (sentinel_val == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = get_array_type(ira->codegen, elem_type, lazy_array_type->length, sentinel_val);
|
|
val->special = ConstValSpecialStatic;
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdOptType: {
|
|
LazyValueOptType *lazy_opt_type = reinterpret_cast<LazyValueOptType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_opt_type->ira;
|
|
|
|
ZigType *payload_type = ir_resolve_type(ira, lazy_opt_type->payload_type);
|
|
if (type_is_invalid(payload_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if (payload_type->id == ZigTypeIdOpaque || payload_type->id == ZigTypeIdUnreachable) {
|
|
ir_add_error(ira, &lazy_opt_type->payload_type->base,
|
|
buf_sprintf("type '%s' cannot be optional", buf_ptr(&payload_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
|
|
return err;
|
|
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = get_optional_type(ira->codegen, payload_type);
|
|
val->special = ConstValSpecialStatic;
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdFnType: {
|
|
LazyValueFnType *lazy_fn_type = reinterpret_cast<LazyValueFnType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_fn_type->ira;
|
|
ZigType *fn_type = ir_resolve_lazy_fn_type(ira, source_node, lazy_fn_type);
|
|
if (fn_type == nullptr)
|
|
return ErrorSemanticAnalyzeFail;
|
|
val->special = ConstValSpecialStatic;
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = fn_type;
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
case LazyValueIdErrUnionType: {
|
|
LazyValueErrUnionType *lazy_err_union_type =
|
|
reinterpret_cast<LazyValueErrUnionType *>(val->data.x_lazy);
|
|
IrAnalyze *ira = lazy_err_union_type->ira;
|
|
|
|
ZigType *err_set_type = ir_resolve_type(ira, lazy_err_union_type->err_set_type);
|
|
if (type_is_invalid(err_set_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
ZigType *payload_type = ir_resolve_type(ira, lazy_err_union_type->payload_type);
|
|
if (type_is_invalid(payload_type))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
if (err_set_type->id != ZigTypeIdErrorSet) {
|
|
ir_add_error(ira, &lazy_err_union_type->err_set_type->base,
|
|
buf_sprintf("expected error set type, found type '%s'",
|
|
buf_ptr(&err_set_type->name)));
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
|
|
if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
|
|
return ErrorSemanticAnalyzeFail;
|
|
|
|
assert(val->type->id == ZigTypeIdMetaType);
|
|
val->data.x_type = get_error_union_type(ira->codegen, err_set_type, payload_type);
|
|
val->special = ConstValSpecialStatic;
|
|
|
|
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
|
return ErrorNone;
|
|
}
|
|
}
|
|
zig_unreachable();
|
|
}
|
|
|
|
Error ir_resolve_lazy(CodeGen *codegen, AstNode *source_node, ZigValue *val) {
|
|
Error err;
|
|
if ((err = ir_resolve_lazy_raw(source_node, val))) {
|
|
if (codegen->trace_err != nullptr && source_node != nullptr && !source_node->already_traced_this_node) {
|
|
source_node->already_traced_this_node = true;
|
|
codegen->trace_err = add_error_note(codegen, codegen->trace_err, source_node,
|
|
buf_create_from_str("referenced here"));
|
|
}
|
|
return err;
|
|
}
|
|
if (type_is_invalid(val->type)) {
|
|
return ErrorSemanticAnalyzeFail;
|
|
}
|
|
return ErrorNone;
|
|
}
|
|
|
|
void IrInst::src() {
|
|
IrInst *inst = this;
|
|
if (inst->source_node != nullptr) {
|
|
inst->source_node->src();
|
|
} else {
|
|
fprintf(stderr, "(null source node)\n");
|
|
}
|
|
}
|
|
|
|
void IrInst::dump() {
|
|
this->src();
|
|
fprintf(stderr, "IrInst(#%" PRIu32 ")\n", this->debug_id);
|
|
}
|
|
|
|
void IrInstSrc::src() {
|
|
this->base.src();
|
|
}
|
|
|
|
void IrInstGen::src() {
|
|
this->base.src();
|
|
}
|
|
|
|
void IrInstSrc::dump() {
|
|
IrInstSrc *inst = this;
|
|
inst->src();
|
|
if (inst->base.scope == nullptr) {
|
|
fprintf(stderr, "(null scope)\n");
|
|
} else {
|
|
ir_print_inst_src(inst->base.scope->codegen, stderr, inst, 0);
|
|
fprintf(stderr, "-> ");
|
|
ir_print_inst_gen(inst->base.scope->codegen, stderr, inst->child, 0);
|
|
}
|
|
}
|
|
void IrInstGen::dump() {
|
|
IrInstGen *inst = this;
|
|
inst->src();
|
|
if (inst->base.scope == nullptr) {
|
|
fprintf(stderr, "(null scope)\n");
|
|
} else {
|
|
ir_print_inst_gen(inst->base.scope->codegen, stderr, inst, 0);
|
|
}
|
|
}
|
|
|
|
void IrAnalyze::dump() {
|
|
ir_print_gen(this->codegen, stderr, this->new_irb.exec, 0);
|
|
if (this->new_irb.current_basic_block != nullptr) {
|
|
fprintf(stderr, "Current basic block:\n");
|
|
ir_print_basic_block_gen(this->codegen, stderr, this->new_irb.current_basic_block, 1);
|
|
}
|
|
}
|
|
|
|
void dbg_ir_break(const char *src_file, uint32_t line) {
|
|
dbg_ir_breakpoints_buf[dbg_ir_breakpoints_count] = {src_file, line};
|
|
dbg_ir_breakpoints_count += 1;
|
|
}
|
|
void dbg_ir_clear(void) {
|
|
dbg_ir_breakpoints_count = 0;
|
|
}
|