*WIP* error sets - correctly resolve inferred error sets
parent
39d5f44863
commit
b8f59e14cd
12
TODO
12
TODO
|
@ -13,3 +13,15 @@ then you can return void, or any error, and the error set is inferred.
|
|||
|
||||
// TODO this is an explicit cast and should actually coerce the type
|
||||
erorr set casting
|
||||
|
||||
|
||||
test err should be comptime if error set has 0 members
|
||||
|
||||
comptime calling fn with inferred error set should give empty error set but still you can use try
|
||||
|
||||
comptime err to int of empty err set and of size 1 err set
|
||||
|
||||
comptime test for err
|
||||
|
||||
|
||||
undefined in infer error
|
||||
|
|
|
@ -5682,7 +5682,7 @@ MultiplyExpression = CurlySuffixExpression MultiplyOperator MultiplyExpression |
|
|||
|
||||
CurlySuffixExpression = TypeExpr option(ContainerInitExpression)
|
||||
|
||||
MultiplyOperator = "*" | "/" | "%" | "**" | "*%"
|
||||
MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%"
|
||||
|
||||
PrefixOpExpression = PrefixOp PrefixOpExpression | SuffixOpExpression
|
||||
|
||||
|
|
|
@ -510,8 +510,7 @@ enum BinOpType {
|
|||
BinOpTypeAssignBitAnd,
|
||||
BinOpTypeAssignBitXor,
|
||||
BinOpTypeAssignBitOr,
|
||||
BinOpTypeAssignBoolAnd,
|
||||
BinOpTypeAssignBoolOr,
|
||||
BinOpTypeAssignMergeErrorSets,
|
||||
BinOpTypeBoolOr,
|
||||
BinOpTypeBoolAnd,
|
||||
BinOpTypeCmpEq,
|
||||
|
@ -537,6 +536,7 @@ enum BinOpType {
|
|||
BinOpTypeArrayCat,
|
||||
BinOpTypeArrayMult,
|
||||
BinOpTypeErrorUnion,
|
||||
BinOpTypeMergeErrorSets,
|
||||
};
|
||||
|
||||
struct AstNodeBinOpExpr {
|
||||
|
@ -2054,6 +2054,7 @@ enum IrBinOp {
|
|||
IrBinOpRemMod,
|
||||
IrBinOpArrayCat,
|
||||
IrBinOpArrayMult,
|
||||
IrBinOpMergeErrorSets,
|
||||
};
|
||||
|
||||
struct IrInstructionBinOp {
|
||||
|
|
|
@ -530,7 +530,6 @@ TypeTableEntry *get_error_union_type(CodeGen *g, TypeTableEntry *err_set_type, T
|
|||
|
||||
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdErrorUnion);
|
||||
entry->is_copyable = true;
|
||||
assert(payload_type->type_ref);
|
||||
assert(payload_type->di_type);
|
||||
ensure_complete_type(g, payload_type);
|
||||
|
||||
|
@ -541,9 +540,16 @@ TypeTableEntry *get_error_union_type(CodeGen *g, TypeTableEntry *err_set_type, T
|
|||
entry->data.error_union.payload_type = payload_type;
|
||||
|
||||
if (!type_has_bits(payload_type)) {
|
||||
entry->type_ref = err_set_type->type_ref;
|
||||
entry->di_type = err_set_type->di_type;
|
||||
|
||||
if (type_has_bits(err_set_type)) {
|
||||
entry->type_ref = err_set_type->type_ref;
|
||||
entry->di_type = err_set_type->di_type;
|
||||
} else {
|
||||
entry->zero_bits = true;
|
||||
entry->di_type = g->builtin_types.entry_void->di_type;
|
||||
}
|
||||
} else if (!type_has_bits(err_set_type)) {
|
||||
entry->type_ref = payload_type->type_ref;
|
||||
entry->di_type = payload_type->di_type;
|
||||
} else {
|
||||
LLVMTypeRef elem_types[] = {
|
||||
err_set_type->type_ref,
|
||||
|
@ -3841,6 +3847,27 @@ void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, Vari
|
|||
}
|
||||
}
|
||||
|
||||
static bool analyze_resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) {
|
||||
FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
|
||||
if (infer_fn != nullptr) {
|
||||
if (infer_fn->anal_state == FnAnalStateInvalid) {
|
||||
return false;
|
||||
} else if (infer_fn->anal_state == FnAnalStateReady) {
|
||||
analyze_fn_body(g, infer_fn);
|
||||
if (err_set_type->data.error_set.infer_fn != nullptr) {
|
||||
assert(g->errors.length != 0);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
add_node_error(g, source_node,
|
||||
buf_sprintf("cannot resolve inferred error set '%s': function '%s' not fully analyzed yet",
|
||||
buf_ptr(&err_set_type->name), buf_ptr(&err_set_type->data.error_set.infer_fn->symbol_name)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_type_node) {
|
||||
TypeTableEntry *fn_type = fn_table_entry->type_entry;
|
||||
assert(!fn_type->data.fn.is_generic);
|
||||
|
@ -3871,6 +3898,13 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
|
|||
return;
|
||||
}
|
||||
|
||||
if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
|
||||
if (!analyze_resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
|
||||
fn_table_entry->anal_state = FnAnalStateInvalid;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return_err_set_type->data.error_set.infer_fn = nullptr;
|
||||
return_err_set_type->data.error_set.err_count = inferred_err_set_type->data.error_set.err_count;
|
||||
return_err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(inferred_err_set_type->data.error_set.err_count);
|
||||
|
|
|
@ -49,12 +49,12 @@ static const char *bin_op_str(BinOpType bin_op) {
|
|||
case BinOpTypeAssignBitAnd: return "&=";
|
||||
case BinOpTypeAssignBitXor: return "^=";
|
||||
case BinOpTypeAssignBitOr: return "|=";
|
||||
case BinOpTypeAssignBoolAnd: return "&&=";
|
||||
case BinOpTypeAssignBoolOr: return "||=";
|
||||
case BinOpTypeAssignMergeErrorSets: return "||=";
|
||||
case BinOpTypeUnwrapMaybe: return "??";
|
||||
case BinOpTypeArrayCat: return "++";
|
||||
case BinOpTypeArrayMult: return "**";
|
||||
case BinOpTypeErrorUnion: return "!";
|
||||
case BinOpTypeMergeErrorSets: return "||";
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
|
|
|
@ -1799,6 +1799,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
|
|||
case IrBinOpArrayCat:
|
||||
case IrBinOpArrayMult:
|
||||
case IrBinOpRemUnspecified:
|
||||
case IrBinOpMergeErrorSets:
|
||||
zig_unreachable();
|
||||
case IrBinOpBoolOr:
|
||||
return LLVMBuildOr(g->builder, op1_value, op2_value, "");
|
||||
|
@ -2188,6 +2189,9 @@ static LLVMValueRef ir_render_err_to_int(CodeGen *g, IrExecutable *executable, I
|
|||
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
|
||||
g->err_tag_type, wanted_type, target_val);
|
||||
} else if (actual_type->id == TypeTableEntryIdErrorUnion) {
|
||||
// this should have been a compile time constant
|
||||
assert(type_has_bits(actual_type->data.error_union.err_set_type));
|
||||
|
||||
if (!type_has_bits(actual_type->data.error_union.payload_type)) {
|
||||
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
|
||||
g->err_tag_type, wanted_type, target_val);
|
||||
|
@ -3428,6 +3432,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
|
|||
LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->value);
|
||||
LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type);
|
||||
|
||||
if (!type_has_bits(err_union_type->data.error_union.err_set_type)) {
|
||||
return err_union_handle;
|
||||
}
|
||||
|
||||
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on && g->errors_by_index.length > 1) {
|
||||
LLVMValueRef err_val;
|
||||
if (type_has_bits(payload_type)) {
|
||||
|
@ -3490,9 +3498,11 @@ static LLVMValueRef ir_render_err_wrap_code(CodeGen *g, IrExecutable *executable
|
|||
assert(wanted_type->id == TypeTableEntryIdErrorUnion);
|
||||
|
||||
TypeTableEntry *payload_type = wanted_type->data.error_union.payload_type;
|
||||
TypeTableEntry *err_set_type = wanted_type->data.error_union.err_set_type;
|
||||
|
||||
LLVMValueRef err_val = ir_llvm_value(g, instruction->value);
|
||||
|
||||
if (!type_has_bits(payload_type))
|
||||
if (!type_has_bits(payload_type) || !type_has_bits(err_set_type))
|
||||
return err_val;
|
||||
|
||||
assert(instruction->tmp_ptr);
|
||||
|
@ -3509,6 +3519,11 @@ static LLVMValueRef ir_render_err_wrap_payload(CodeGen *g, IrExecutable *executa
|
|||
assert(wanted_type->id == TypeTableEntryIdErrorUnion);
|
||||
|
||||
TypeTableEntry *payload_type = wanted_type->data.error_union.payload_type;
|
||||
TypeTableEntry *err_set_type = wanted_type->data.error_union.err_set_type;
|
||||
|
||||
if (!type_has_bits(err_set_type)) {
|
||||
return ir_llvm_value(g, instruction->value);
|
||||
}
|
||||
|
||||
LLVMValueRef ok_err_val = LLVMConstNull(g->err_tag_type->type_ref);
|
||||
|
||||
|
@ -4328,9 +4343,14 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val) {
|
|||
case TypeTableEntryIdErrorUnion:
|
||||
{
|
||||
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
|
||||
TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
|
||||
if (!type_has_bits(payload_type)) {
|
||||
assert(type_has_bits(err_set_type));
|
||||
uint64_t value = const_val->data.x_err_union.err ? const_val->data.x_err_union.err->value : 0;
|
||||
return LLVMConstInt(g->err_tag_type->type_ref, value, false);
|
||||
} else if (!type_has_bits(err_set_type)) {
|
||||
assert(type_has_bits(payload_type));
|
||||
return gen_const_val(g, const_val->data.x_err_union.payload);
|
||||
} else {
|
||||
LLVMValueRef err_tag_value;
|
||||
LLVMValueRef err_payload_value;
|
||||
|
|
167
src/ir.cpp
167
src/ir.cpp
|
@ -2869,10 +2869,8 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node)
|
|||
return ir_gen_assign_op(irb, scope, node, IrBinOpBinXor);
|
||||
case BinOpTypeAssignBitOr:
|
||||
return ir_gen_assign_op(irb, scope, node, IrBinOpBinOr);
|
||||
case BinOpTypeAssignBoolAnd:
|
||||
return ir_gen_assign_op(irb, scope, node, IrBinOpBoolAnd);
|
||||
case BinOpTypeAssignBoolOr:
|
||||
return ir_gen_assign_op(irb, scope, node, IrBinOpBoolOr);
|
||||
case BinOpTypeAssignMergeErrorSets:
|
||||
return ir_gen_assign_op(irb, scope, node, IrBinOpMergeErrorSets);
|
||||
case BinOpTypeBoolOr:
|
||||
return ir_gen_bool_or(irb, scope, node);
|
||||
case BinOpTypeBoolAnd:
|
||||
|
@ -2919,6 +2917,8 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node)
|
|||
return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayCat);
|
||||
case BinOpTypeArrayMult:
|
||||
return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult);
|
||||
case BinOpTypeMergeErrorSets:
|
||||
return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets);
|
||||
case BinOpTypeUnwrapMaybe:
|
||||
return ir_gen_maybe_ok_or(irb, scope, node);
|
||||
case BinOpTypeErrorUnion:
|
||||
|
@ -5420,6 +5420,7 @@ static TypeTableEntry *get_error_set_union(CodeGen *g, ErrorTableEntry **errors,
|
|||
}
|
||||
}
|
||||
assert(index == count);
|
||||
assert(count != 0);
|
||||
|
||||
buf_appendf(&err_set_type->name, "}");
|
||||
|
||||
|
@ -5453,21 +5454,21 @@ static IrInstruction *ir_gen_err_set_decl(IrBuilder *irb, Scope *parent_scope, A
|
|||
|
||||
uint32_t err_count = node->data.err_set_decl.decls.length;
|
||||
|
||||
if (err_count == 0) {
|
||||
add_node_error(irb->codegen, node, buf_sprintf("empty error set"));
|
||||
return irb->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
Buf *type_name = get_anon_type_name(irb->codegen, irb->exec, "error set", node);
|
||||
TypeTableEntry *err_set_type = new_type_table_entry(TypeTableEntryIdErrorSet);
|
||||
buf_init_from_buf(&err_set_type->name, type_name);
|
||||
err_set_type->is_copyable = true;
|
||||
err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref;
|
||||
err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type;
|
||||
err_set_type->data.error_set.err_count = err_count;
|
||||
err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(err_count);
|
||||
|
||||
irb->codegen->error_di_types.append(&err_set_type->di_type);
|
||||
if (err_count == 0) {
|
||||
err_set_type->zero_bits = true;
|
||||
err_set_type->di_type = irb->codegen->builtin_types.entry_void->di_type;
|
||||
} else {
|
||||
err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref;
|
||||
err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type;
|
||||
irb->codegen->error_di_types.append(&err_set_type->di_type);
|
||||
err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(err_count);
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < err_count; i += 1) {
|
||||
AstNode *symbol_node = node->data.err_set_decl.decls.at(i);
|
||||
|
@ -6657,6 +6658,27 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
|
|||
return ImplicitCastMatchResultNo;
|
||||
}
|
||||
|
||||
static bool resolve_inferred_error_set(IrAnalyze *ira, TypeTableEntry *err_set_type, AstNode *source_node) {
|
||||
FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
|
||||
if (infer_fn != nullptr) {
|
||||
if (infer_fn->anal_state == FnAnalStateInvalid) {
|
||||
return false;
|
||||
} else if (infer_fn->anal_state == FnAnalStateReady) {
|
||||
analyze_fn_body(ira->codegen, infer_fn);
|
||||
if (err_set_type->data.error_set.infer_fn != nullptr) {
|
||||
assert(ira->codegen->errors.length != 0);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
ir_add_error_node(ira, source_node,
|
||||
buf_sprintf("cannot resolve inferred error set '%s': function '%s' not fully analyzed yet",
|
||||
buf_ptr(&err_set_type->name), buf_ptr(&err_set_type->data.error_set.infer_fn->symbol_name)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, IrInstruction **instructions, size_t instruction_count) {
|
||||
assert(instruction_count >= 1);
|
||||
IrInstruction *prev_inst = instructions[0];
|
||||
|
@ -6670,6 +6692,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
|
|||
} else if (prev_inst->value.type->id == TypeTableEntryIdErrorSet) {
|
||||
err_set_type = prev_inst->value.type;
|
||||
errors = allocate<ErrorTableEntry *>(ira->codegen->errors_by_index.length);
|
||||
if (!resolve_inferred_error_set(ira, err_set_type, prev_inst->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
|
||||
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
||||
errors[error_entry->value] = error_entry;
|
||||
|
@ -6717,6 +6742,10 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
|
|||
prev_inst = cur_inst;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
// if err_set_type is a superset of cur_type, keep err_set_type.
|
||||
// if cur_type is a superset of err_set_type, switch err_set_type to cur_type
|
||||
bool prev_is_superset = true;
|
||||
|
@ -6778,6 +6807,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
|
|||
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
|
||||
errors[error_entry->value] = nullptr;
|
||||
}
|
||||
if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
|
||||
ErrorTableEntry *error_entry = cur_err_set_type->data.error_set.errors[i];
|
||||
errors[error_entry->value] = error_entry;
|
||||
|
@ -6820,6 +6852,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
|
|||
if (err_set_type == ira->codegen->builtin_types.entry_global_error_set) {
|
||||
continue;
|
||||
}
|
||||
if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (err_set_type == nullptr) {
|
||||
err_set_type = cur_type;
|
||||
errors = allocate<ErrorTableEntry *>(ira->codegen->errors_by_index.length);
|
||||
|
@ -7543,6 +7578,8 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
|
|||
assert(contained_set->id == TypeTableEntryIdErrorSet);
|
||||
assert(container_set->id == TypeTableEntryIdErrorSet);
|
||||
|
||||
zig_panic("TODO explicit error set cast");
|
||||
|
||||
if (container_set->data.error_set.infer_fn == nullptr &&
|
||||
container_set != ira->codegen->builtin_types.entry_global_error_set)
|
||||
{
|
||||
|
@ -8058,6 +8095,34 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc
|
|||
return result;
|
||||
}
|
||||
|
||||
TypeTableEntry *err_set_type;
|
||||
if (err_type->id == TypeTableEntryIdErrorUnion) {
|
||||
err_set_type = err_type->data.error_union.err_set_type;
|
||||
} else if (err_type->id == TypeTableEntryIdErrorSet) {
|
||||
err_set_type = err_type;
|
||||
} else {
|
||||
zig_unreachable();
|
||||
}
|
||||
if (err_set_type != ira->codegen->builtin_types.entry_global_error_set) {
|
||||
if (!resolve_inferred_error_set(ira, err_set_type, source_instr->source_node)) {
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
if (err_set_type->data.error_set.err_count == 0) {
|
||||
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
|
||||
source_instr->source_node, wanted_type);
|
||||
result->value.type = wanted_type;
|
||||
bigint_init_unsigned(&result->value.data.x_bigint, 0);
|
||||
return result;
|
||||
} else if (err_set_type->data.error_set.err_count == 1) {
|
||||
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
|
||||
source_instr->source_node, wanted_type);
|
||||
result->value.type = wanted_type;
|
||||
ErrorTableEntry *err = err_set_type->data.error_set.errors[0];
|
||||
bigint_init_unsigned(&result->value.data.x_bigint, err->value);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
BigInt bn;
|
||||
bigint_init_unsigned(&bn, ira->codegen->errors_by_index.length);
|
||||
if (!bigint_fits_in_bits(&bn, wanted_type->data.integral.bit_count, wanted_type->data.integral.is_signed)) {
|
||||
|
@ -9053,6 +9118,7 @@ static int ir_eval_math_op(TypeTableEntry *type_entry, ConstExprValue *op1_val,
|
|||
case IrBinOpArrayCat:
|
||||
case IrBinOpArrayMult:
|
||||
case IrBinOpRemUnspecified:
|
||||
case IrBinOpMergeErrorSets:
|
||||
zig_unreachable();
|
||||
case IrBinOpBinOr:
|
||||
assert(is_int);
|
||||
|
@ -9625,6 +9691,45 @@ static TypeTableEntry *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp
|
|||
return get_array_type(ira->codegen, child_type, new_array_len);
|
||||
}
|
||||
|
||||
static TypeTableEntry *ir_analyze_merge_error_sets(IrAnalyze *ira, IrInstructionBinOp *instruction) {
|
||||
TypeTableEntry *op1_type = ir_resolve_type(ira, instruction->op1->other);
|
||||
if (type_is_invalid(op1_type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
TypeTableEntry *op2_type = ir_resolve_type(ira, instruction->op2->other);
|
||||
if (type_is_invalid(op2_type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
if (op1_type == ira->codegen->builtin_types.entry_global_error_set ||
|
||||
op2_type == ira->codegen->builtin_types.entry_global_error_set)
|
||||
{
|
||||
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
|
||||
out_val->data.x_type = ira->codegen->builtin_types.entry_global_error_set;
|
||||
return ira->codegen->builtin_types.entry_type;
|
||||
}
|
||||
|
||||
if (!resolve_inferred_error_set(ira, op1_type, instruction->op1->other->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
|
||||
if (!resolve_inferred_error_set(ira, op2_type, instruction->op2->other->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
|
||||
ErrorTableEntry **errors = allocate<ErrorTableEntry *>(ira->codegen->errors_by_index.length);
|
||||
for (uint32_t i = 0; i < op1_type->data.error_set.err_count; i += 1) {
|
||||
ErrorTableEntry *error_entry = op1_type->data.error_set.errors[i];
|
||||
errors[error_entry->value] = error_entry;
|
||||
}
|
||||
TypeTableEntry *result_type = get_error_set_union(ira->codegen, errors, op1_type, op2_type);
|
||||
free(errors);
|
||||
|
||||
|
||||
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
|
||||
out_val->data.x_type = result_type;
|
||||
return ira->codegen->builtin_types.entry_type;
|
||||
}
|
||||
|
||||
static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
|
||||
IrBinOp op_id = bin_op_instruction->op_id;
|
||||
switch (op_id) {
|
||||
|
@ -9666,6 +9771,8 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi
|
|||
return ir_analyze_array_cat(ira, bin_op_instruction);
|
||||
case IrBinOpArrayMult:
|
||||
return ir_analyze_array_mult(ira, bin_op_instruction);
|
||||
case IrBinOpMergeErrorSets:
|
||||
return ir_analyze_merge_error_sets(ira, bin_op_instruction);
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
|
@ -11605,6 +11712,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
|
|||
}
|
||||
err_set_type = err_entry->set_with_only_this_in_it;
|
||||
} else {
|
||||
if (!resolve_inferred_error_set(ira, child_type, field_ptr_instruction->base.source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
ErrorTableEntry *err_entry = find_err_table_entry(child_type, field_name);
|
||||
if (err_entry == nullptr) {
|
||||
ir_add_error(ira, &field_ptr_instruction->base,
|
||||
|
@ -14623,6 +14733,19 @@ static TypeTableEntry *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruc
|
|||
}
|
||||
}
|
||||
|
||||
TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
|
||||
if (!resolve_inferred_error_set(ira, err_set_type, instruction->base.source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (err_set_type != ira->codegen->builtin_types.entry_global_error_set &&
|
||||
err_set_type->data.error_set.err_count == 0)
|
||||
{
|
||||
assert(err_set_type->data.error_set.infer_fn == nullptr);
|
||||
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
|
||||
out_val->data.x_bool = false;
|
||||
return ira->codegen->builtin_types.entry_bool;
|
||||
}
|
||||
|
||||
ir_build_test_err_from(&ira->new_irb, &instruction->base, value);
|
||||
return ira->codegen->builtin_types.entry_bool;
|
||||
} else if (type_entry->id == TypeTableEntryIdErrorSet) {
|
||||
|
@ -14861,22 +14984,8 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira
|
|||
}
|
||||
}
|
||||
} else if (switch_type->id == TypeTableEntryIdErrorSet) {
|
||||
FnTableEntry *infer_fn = switch_type->data.error_set.infer_fn;
|
||||
if (infer_fn != nullptr) {
|
||||
if (infer_fn->anal_state == FnAnalStateInvalid) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
} else if (infer_fn->anal_state == FnAnalStateReady) {
|
||||
analyze_fn_body(ira->codegen, infer_fn);
|
||||
if (switch_type->data.error_set.infer_fn != nullptr) {
|
||||
assert(ira->codegen->errors.length != 0);
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
} else {
|
||||
ir_add_error(ira, &instruction->base,
|
||||
buf_sprintf("cannot switch on inferred error set '%s': function '%s' not fully analyzed yet",
|
||||
buf_ptr(&switch_type->name), buf_ptr(&switch_type->data.error_set.infer_fn->symbol_name)));
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (!resolve_inferred_error_set(ira, switch_type, target_value->source_node)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
|
||||
AstNode **field_prev_uses = allocate<AstNode *>(ira->codegen->errors_by_index.length);
|
||||
|
|
|
@ -130,6 +130,8 @@ static const char *ir_bin_op_id_str(IrBinOp op_id) {
|
|||
return "++";
|
||||
case IrBinOpArrayMult:
|
||||
return "**";
|
||||
case IrBinOpMergeErrorSets:
|
||||
return "||";
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
|
|
|
@ -1088,12 +1088,13 @@ static BinOpType tok_to_mult_op(Token *token) {
|
|||
case TokenIdSlash: return BinOpTypeDiv;
|
||||
case TokenIdPercent: return BinOpTypeMod;
|
||||
case TokenIdBang: return BinOpTypeErrorUnion;
|
||||
case TokenIdBarBar: return BinOpTypeMergeErrorSets;
|
||||
default: return BinOpTypeInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
MultiplyOperator = "!" | "*" | "/" | "%" | "**" | "*%"
|
||||
MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%"
|
||||
*/
|
||||
static BinOpType ast_parse_mult_op(ParseContext *pc, size_t *token_index, bool mandatory) {
|
||||
Token *token = &pc->tokens->at(*token_index);
|
||||
|
|
|
@ -195,7 +195,8 @@ enum TokenizeState {
|
|||
TokenizeStateSawMinusPercent,
|
||||
TokenizeStateSawAmpersand,
|
||||
TokenizeStateSawCaret,
|
||||
TokenizeStateSawPipe,
|
||||
TokenizeStateSawBar,
|
||||
TokenizeStateSawBarBar,
|
||||
TokenizeStateLineComment,
|
||||
TokenizeStateLineString,
|
||||
TokenizeStateLineStringEnd,
|
||||
|
@ -594,7 +595,7 @@ void tokenize(Buf *buf, Tokenization *out) {
|
|||
break;
|
||||
case '|':
|
||||
begin_token(&t, TokenIdBinOr);
|
||||
t.state = TokenizeStateSawPipe;
|
||||
t.state = TokenizeStateSawBar;
|
||||
break;
|
||||
case '=':
|
||||
begin_token(&t, TokenIdEq);
|
||||
|
@ -888,20 +889,37 @@ void tokenize(Buf *buf, Tokenization *out) {
|
|||
continue;
|
||||
}
|
||||
break;
|
||||
case TokenizeStateSawPipe:
|
||||
case TokenizeStateSawBar:
|
||||
switch (c) {
|
||||
case '=':
|
||||
set_token_id(&t, t.cur_tok, TokenIdBitOrEq);
|
||||
end_token(&t);
|
||||
t.state = TokenizeStateStart;
|
||||
break;
|
||||
case '|':
|
||||
set_token_id(&t, t.cur_tok, TokenIdBarBar);
|
||||
t.state = TokenizeStateSawBarBar;
|
||||
break;
|
||||
default:
|
||||
t.pos -= 1;
|
||||
end_token(&t);
|
||||
t.state = TokenizeStateStart;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
case TokenizeStateSawBarBar:
|
||||
switch (c) {
|
||||
case '=':
|
||||
set_token_id(&t, t.cur_tok, TokenIdBarBarEq);
|
||||
end_token(&t);
|
||||
t.state = TokenizeStateStart;
|
||||
break;
|
||||
default:
|
||||
t.pos -= 1;
|
||||
end_token(&t);
|
||||
t.state = TokenizeStateStart;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
case TokenizeStateSawSlash:
|
||||
switch (c) {
|
||||
case '/':
|
||||
|
@ -1428,7 +1446,7 @@ void tokenize(Buf *buf, Tokenization *out) {
|
|||
case TokenizeStateSawDash:
|
||||
case TokenizeStateSawAmpersand:
|
||||
case TokenizeStateSawCaret:
|
||||
case TokenizeStateSawPipe:
|
||||
case TokenizeStateSawBar:
|
||||
case TokenizeStateSawEq:
|
||||
case TokenizeStateSawBang:
|
||||
case TokenizeStateSawLessThan:
|
||||
|
@ -1443,6 +1461,7 @@ void tokenize(Buf *buf, Tokenization *out) {
|
|||
case TokenizeStateSawMinusPercent:
|
||||
case TokenizeStateLineString:
|
||||
case TokenizeStateLineStringEnd:
|
||||
case TokenizeStateSawBarBar:
|
||||
end_token(&t);
|
||||
break;
|
||||
case TokenizeStateSawDotDot:
|
||||
|
@ -1475,6 +1494,7 @@ const char * token_name(TokenId id) {
|
|||
case TokenIdArrow: return "->";
|
||||
case TokenIdAtSign: return "@";
|
||||
case TokenIdBang: return "!";
|
||||
case TokenIdBarBar: return "||";
|
||||
case TokenIdBinOr: return "|";
|
||||
case TokenIdBinXor: return "^";
|
||||
case TokenIdBitAndEq: return "&=";
|
||||
|
@ -1577,6 +1597,7 @@ const char * token_name(TokenId id) {
|
|||
case TokenIdTimesEq: return "*=";
|
||||
case TokenIdTimesPercent: return "*%";
|
||||
case TokenIdTimesPercentEq: return "*%=";
|
||||
case TokenIdBarBarEq: return "||=";
|
||||
}
|
||||
return "(invalid token)";
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ enum TokenId {
|
|||
TokenIdArrow,
|
||||
TokenIdAtSign,
|
||||
TokenIdBang,
|
||||
TokenIdBarBar,
|
||||
TokenIdBarBarEq,
|
||||
TokenIdBinOr,
|
||||
TokenIdBinXor,
|
||||
TokenIdBitAndEq,
|
||||
|
|
|
@ -210,6 +210,7 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: &io.OutStream, a
|
|||
}
|
||||
} else |err| switch (err) {
|
||||
error.EndOfFile => {},
|
||||
else => return err,
|
||||
}
|
||||
} else |err| switch (err) {
|
||||
error.MissingDebugInfo, error.InvalidDebugInfo => {
|
||||
|
|
|
@ -102,12 +102,14 @@ pub const File = struct {
|
|||
/// The OS-specific file descriptor or file handle.
|
||||
handle: os.FileHandle,
|
||||
|
||||
const OpenError = os.WindowsOpenError || os.PosixOpenError;
|
||||
|
||||
/// `path` may need to be copied in memory to add a null terminating byte. In this case
|
||||
/// a fixed size buffer of size std.os.max_noalloc_path_len is an attempted solution. If the fixed
|
||||
/// size buffer is too small, and the provided allocator is null, error.NameTooLong is returned.
|
||||
/// otherwise if the fixed size buffer is too small, allocator is used to obtain the needed memory.
|
||||
/// Call close to clean up.
|
||||
pub fn openRead(path: []const u8, allocator: ?&mem.Allocator) !File {
|
||||
pub fn openRead(path: []const u8, allocator: ?&mem.Allocator) OpenError!File {
|
||||
if (is_posix) {
|
||||
const flags = system.O_LARGEFILE|system.O_RDONLY;
|
||||
const fd = try os.posixOpen(path, flags, 0, allocator);
|
||||
|
@ -338,7 +340,9 @@ pub const File = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn write(self: &File, bytes: []const u8) !void {
|
||||
const WriteError = os.WindowsWriteError || os.PosixWriteError;
|
||||
|
||||
fn write(self: &File, bytes: []const u8) WriteError!void {
|
||||
if (is_posix) {
|
||||
try os.posixWrite(self.handle, bytes);
|
||||
} else if (is_windows) {
|
||||
|
|
10
std/mem.zig
10
std/mem.zig
|
@ -5,12 +5,12 @@ const math = std.math;
|
|||
const builtin = @import("builtin");
|
||||
|
||||
pub const Allocator = struct {
|
||||
const Errors = error {OutOfMemory};
|
||||
const Error = error {OutOfMemory};
|
||||
|
||||
/// Allocate byte_count bytes and return them in a slice, with the
|
||||
/// slice's pointer aligned at least to alignment bytes.
|
||||
/// The returned newly allocated memory is undefined.
|
||||
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Errors![]u8,
|
||||
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
|
||||
|
||||
/// If `new_byte_count > old_mem.len`:
|
||||
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
|
||||
|
@ -21,7 +21,7 @@ pub const Allocator = struct {
|
|||
/// * alignment <= alignment of old_mem.ptr
|
||||
///
|
||||
/// The returned newly allocated memory is undefined.
|
||||
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Errors![]u8,
|
||||
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
|
||||
|
||||
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
|
||||
freeFn: fn (self: &Allocator, old_mem: []u8) void,
|
||||
|
@ -42,7 +42,7 @@ pub const Allocator = struct {
|
|||
fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29,
|
||||
n: usize) ![]align(alignment) T
|
||||
{
|
||||
const byte_count = try math.mul(usize, @sizeOf(T), n);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.allocFn(self, byte_count, alignment);
|
||||
// This loop should get optimized out in ReleaseFast mode
|
||||
for (byte_slice) |*byte| {
|
||||
|
@ -63,7 +63,7 @@ pub const Allocator = struct {
|
|||
}
|
||||
|
||||
const old_byte_slice = ([]u8)(old_mem);
|
||||
const byte_count = try math.mul(usize, @sizeOf(T), n);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
|
||||
// This loop should get optimized out in ReleaseFast mode
|
||||
for (byte_slice[old_byte_slice.len..]) |*byte| {
|
||||
|
|
|
@ -38,6 +38,9 @@ pub const windowsLoadDll = windows_util.windowsLoadDll;
|
|||
pub const windowsUnloadDll = windows_util.windowsUnloadDll;
|
||||
pub const createWindowsEnvBlock = windows_util.createWindowsEnvBlock;
|
||||
|
||||
pub const WindowsOpenError = windows_util.OpenError;
|
||||
pub const WindowsWriteError = windows_util.WriteError;
|
||||
|
||||
pub const FileHandle = if (is_windows) windows.HANDLE else i32;
|
||||
|
||||
const debug = std.debug;
|
||||
|
@ -188,8 +191,21 @@ pub fn posixRead(fd: i32, buf: []u8) !void {
|
|||
}
|
||||
}
|
||||
|
||||
pub const PosixWriteError = error {
|
||||
WouldBlock,
|
||||
FileClosed,
|
||||
DestinationAddressRequired,
|
||||
DiskQuota,
|
||||
FileTooBig,
|
||||
InputOutput,
|
||||
NoSpaceLeft,
|
||||
AccessDenied,
|
||||
BrokenPipe,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
/// Calls POSIX write, and keeps trying if it gets interrupted.
|
||||
pub fn posixWrite(fd: i32, bytes: []const u8) !void {
|
||||
pub fn posixWrite(fd: i32, bytes: []const u8) PosixWriteError!void {
|
||||
while (true) {
|
||||
const write_ret = posix.write(fd, bytes.ptr, bytes.len);
|
||||
const write_err = posix.getErrno(write_ret);
|
||||
|
@ -197,15 +213,15 @@ pub fn posixWrite(fd: i32, bytes: []const u8) !void {
|
|||
return switch (write_err) {
|
||||
posix.EINTR => continue,
|
||||
posix.EINVAL, posix.EFAULT => unreachable,
|
||||
posix.EAGAIN => error.WouldBlock,
|
||||
posix.EBADF => error.FileClosed,
|
||||
posix.EDESTADDRREQ => error.DestinationAddressRequired,
|
||||
posix.EDQUOT => error.DiskQuota,
|
||||
posix.EFBIG => error.FileTooBig,
|
||||
posix.EIO => error.InputOutput,
|
||||
posix.ENOSPC => error.NoSpaceLeft,
|
||||
posix.EPERM => error.AccessDenied,
|
||||
posix.EPIPE => error.BrokenPipe,
|
||||
posix.EAGAIN => PosixWriteError.WouldBlock,
|
||||
posix.EBADF => PosixWriteError.FileClosed,
|
||||
posix.EDESTADDRREQ => PosixWriteError.DestinationAddressRequired,
|
||||
posix.EDQUOT => PosixWriteError.DiskQuota,
|
||||
posix.EFBIG => PosixWriteError.FileTooBig,
|
||||
posix.EIO => PosixWriteError.InputOutput,
|
||||
posix.ENOSPC => PosixWriteError.NoSpaceLeft,
|
||||
posix.EPERM => PosixWriteError.AccessDenied,
|
||||
posix.EPIPE => PosixWriteError.BrokenPipe,
|
||||
else => unexpectedErrorPosix(write_err),
|
||||
};
|
||||
}
|
||||
|
@ -213,13 +229,32 @@ pub fn posixWrite(fd: i32, bytes: []const u8) !void {
|
|||
}
|
||||
}
|
||||
|
||||
pub const PosixOpenError = error {
|
||||
OutOfMemory,
|
||||
AccessDenied,
|
||||
FileTooBig,
|
||||
IsDir,
|
||||
SymLinkLoop,
|
||||
ProcessFdQuotaExceeded,
|
||||
NameTooLong,
|
||||
SystemFdQuotaExceeded,
|
||||
NoDevice,
|
||||
PathNotFound,
|
||||
SystemResources,
|
||||
NoSpaceLeft,
|
||||
NotDir,
|
||||
AccessDenied,
|
||||
PathAlreadyExists,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
/// ::file_path may need to be copied in memory to add a null terminating byte. In this case
|
||||
/// a fixed size buffer of size ::max_noalloc_path_len is an attempted solution. If the fixed
|
||||
/// size buffer is too small, and the provided allocator is null, ::error.NameTooLong is returned.
|
||||
/// otherwise if the fixed size buffer is too small, allocator is used to obtain the needed memory.
|
||||
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
|
||||
/// the return value into zig errors.
|
||||
pub fn posixOpen(file_path: []const u8, flags: u32, perm: usize, allocator: ?&Allocator) !i32 {
|
||||
pub fn posixOpen(file_path: []const u8, flags: u32, perm: usize, allocator: ?&Allocator) PosixOpenError!i32 {
|
||||
var stack_buf: [max_noalloc_path_len]u8 = undefined;
|
||||
var path0: []u8 = undefined;
|
||||
var need_free = false;
|
||||
|
@ -247,20 +282,20 @@ pub fn posixOpen(file_path: []const u8, flags: u32, perm: usize, allocator: ?&Al
|
|||
|
||||
posix.EFAULT => unreachable,
|
||||
posix.EINVAL => unreachable,
|
||||
posix.EACCES => error.AccessDenied,
|
||||
posix.EFBIG, posix.EOVERFLOW => error.FileTooBig,
|
||||
posix.EISDIR => error.IsDir,
|
||||
posix.ELOOP => error.SymLinkLoop,
|
||||
posix.EMFILE => error.ProcessFdQuotaExceeded,
|
||||
posix.ENAMETOOLONG => error.NameTooLong,
|
||||
posix.ENFILE => error.SystemFdQuotaExceeded,
|
||||
posix.ENODEV => error.NoDevice,
|
||||
posix.ENOENT => error.PathNotFound,
|
||||
posix.ENOMEM => error.SystemResources,
|
||||
posix.ENOSPC => error.NoSpaceLeft,
|
||||
posix.ENOTDIR => error.NotDir,
|
||||
posix.EPERM => error.AccessDenied,
|
||||
posix.EEXIST => error.PathAlreadyExists,
|
||||
posix.EACCES => PosixOpenError.AccessDenied,
|
||||
posix.EFBIG, posix.EOVERFLOW => PosixOpenError.FileTooBig,
|
||||
posix.EISDIR => PosixOpenError.IsDir,
|
||||
posix.ELOOP => PosixOpenError.SymLinkLoop,
|
||||
posix.EMFILE => PosixOpenError.ProcessFdQuotaExceeded,
|
||||
posix.ENAMETOOLONG => PosixOpenError.NameTooLong,
|
||||
posix.ENFILE => PosixOpenError.SystemFdQuotaExceeded,
|
||||
posix.ENODEV => PosixOpenError.NoDevice,
|
||||
posix.ENOENT => PosixOpenError.PathNotFound,
|
||||
posix.ENOMEM => PosixOpenError.SystemResources,
|
||||
posix.ENOSPC => PosixOpenError.NoSpaceLeft,
|
||||
posix.ENOTDIR => PosixOpenError.NotDir,
|
||||
posix.EPERM => PosixOpenError.AccessDenied,
|
||||
posix.EEXIST => PosixOpenError.PathAlreadyExists,
|
||||
else => unexpectedErrorPosix(err),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -26,16 +26,25 @@ pub fn windowsClose(handle: windows.HANDLE) void {
|
|||
assert(windows.CloseHandle(handle) != 0);
|
||||
}
|
||||
|
||||
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) !void {
|
||||
pub const WriteError = error {
|
||||
SystemResources,
|
||||
OperationAborted,
|
||||
SystemResources,
|
||||
IoPending,
|
||||
BrokenPipe,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
|
||||
if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
|
||||
const err = windows.GetLastError();
|
||||
return switch (err) {
|
||||
windows.ERROR.INVALID_USER_BUFFER => error.SystemResources,
|
||||
windows.ERROR.NOT_ENOUGH_MEMORY => error.SystemResources,
|
||||
windows.ERROR.OPERATION_ABORTED => error.OperationAborted,
|
||||
windows.ERROR.NOT_ENOUGH_QUOTA => error.SystemResources,
|
||||
windows.ERROR.IO_PENDING => error.IoPending,
|
||||
windows.ERROR.BROKEN_PIPE => error.BrokenPipe,
|
||||
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
|
||||
windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources,
|
||||
windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted,
|
||||
windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources,
|
||||
windows.ERROR.IO_PENDING => WriteError.IoPending,
|
||||
windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe,
|
||||
else => os.unexpectedErrorWindows(err),
|
||||
};
|
||||
}
|
||||
|
@ -66,12 +75,22 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
|
|||
mem.indexOf(u16, name_wide, []u16{'-','p','t','y'}) != null;
|
||||
}
|
||||
|
||||
pub const OpenError = error {
|
||||
SharingViolation,
|
||||
PathAlreadyExists,
|
||||
FileNotFound,
|
||||
AccessDenied,
|
||||
PipeBusy,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
/// `file_path` may need to be copied in memory to add a null terminating byte. In this case
|
||||
/// a fixed size buffer of size ::max_noalloc_path_len is an attempted solution. If the fixed
|
||||
/// size buffer is too small, and the provided allocator is null, ::error.NameTooLong is returned.
|
||||
/// otherwise if the fixed size buffer is too small, allocator is used to obtain the needed memory.
|
||||
pub fn windowsOpen(file_path: []const u8, desired_access: windows.DWORD, share_mode: windows.DWORD,
|
||||
creation_disposition: windows.DWORD, flags_and_attrs: windows.DWORD, allocator: ?&mem.Allocator) %windows.HANDLE
|
||||
creation_disposition: windows.DWORD, flags_and_attrs: windows.DWORD, allocator: ?&mem.Allocator)
|
||||
OpenError!windows.HANDLE
|
||||
{
|
||||
var stack_buf: [os.max_noalloc_path_len]u8 = undefined;
|
||||
var path0: []u8 = undefined;
|
||||
|
@ -95,11 +114,11 @@ pub fn windowsOpen(file_path: []const u8, desired_access: windows.DWORD, share_m
|
|||
if (result == windows.INVALID_HANDLE_VALUE) {
|
||||
const err = windows.GetLastError();
|
||||
return switch (err) {
|
||||
windows.ERROR.SHARING_VIOLATION => error.SharingViolation,
|
||||
windows.ERROR.ALREADY_EXISTS, windows.ERROR.FILE_EXISTS => error.PathAlreadyExists,
|
||||
windows.ERROR.FILE_NOT_FOUND => error.FileNotFound,
|
||||
windows.ERROR.ACCESS_DENIED => error.AccessDenied,
|
||||
windows.ERROR.PIPE_BUSY => error.PipeBusy,
|
||||
windows.ERROR.SHARING_VIOLATION => OpenError.SharingViolation,
|
||||
windows.ERROR.ALREADY_EXISTS, windows.ERROR.FILE_EXISTS => OpenError.PathAlreadyExists,
|
||||
windows.ERROR.FILE_NOT_FOUND => OpenError.FileNotFound,
|
||||
windows.ERROR.ACCESS_DENIED => OpenError.AccessDenied,
|
||||
windows.ERROR.PIPE_BUSY => OpenError.PipeBusy,
|
||||
else => os.unexpectedErrorWindows(err),
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue