rename "debug safety" to "runtime safety"

closes #437
This commit is contained in:
Andrew Kelley 2018-01-25 01:46:12 -05:00
parent b71a56c9df
commit e5bc5873d7
35 changed files with 221 additions and 243 deletions

View File

@ -116,7 +116,7 @@ pub fn build(b: &Builder) -> %void {
test_step.dependOn(tests.addBuildExampleTests(b, test_filter));
test_step.dependOn(tests.addCompileErrorTests(b, test_filter));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter));
test_step.dependOn(tests.addDebugSafetyTests(b, test_filter));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
test_step.dependOn(tests.addGenHTests(b, test_filter));
}

View File

@ -896,7 +896,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: &io
}
if (mem.indexOf(u8, result.stderr, error_match) == null) {
warn("{}\nExpected to find '{}' in stderr", result.stderr, error_match);
return parseError(tokenizer, code.source_token, "example did not have expected debug safety error message");
return parseError(tokenizer, code.source_token, "example did not have expected runtime safety error message");
}
const escaped_stderr = try escapeHtml(allocator, result.stderr);
const colored_stderr = try termColor(allocator, escaped_stderr);

View File

@ -3090,7 +3090,7 @@ fn doAThing(nullable_foo: ?&Foo) {
{#header_open|this#}
<p>TODO: example of this referring to Self struct</p>
<p>TODO: example of this referring to recursion function</p>
<p>TODO: example of this referring to basic block for @setDebugSafety</p>
<p>TODO: example of this referring to basic block for @setRuntimeSafety</p>
{#header_close#}
{#header_open|comptime#}
<p>
@ -4325,10 +4325,10 @@ test "call foo" {
Tells the optimizer that a function is rarely called.
</p>
{#header_close#}
{#header_open|@setDebugSafety#}
<pre><code class="zig">@setDebugSafety(scope, safety_on: bool)</code></pre>
{#header_open|@setRuntimeSafety#}
<pre><code class="zig">@setRuntimeSafety(safety_on: bool)</code></pre>
<p>
Sets whether debug safety checks are on for a given scope.
Sets whether runtime safety checks are on for the scope that contains the function call.
</p>
{#header_close#}
@ -4595,7 +4595,7 @@ pub fn build(b: &Builder) -> %void {
detected at compile-time, Zig emits an error. Most undefined behavior that
cannot be detected at compile-time can be detected at runtime. In these cases,
Zig has safety checks. Safety checks can be disabled on a per-block basis
with <code>@setDebugSafety</code>. The {#link|ReleaseFast#}
with <code>@setRuntimeSafety</code>. The {#link|ReleaseFast#}
build mode disables all safety checks in order to facilitate optimizations.
</p>
<p>
@ -5765,7 +5765,7 @@ hljs.registerLanguage("zig", function(t) {
a = t.IR + "\\s*\\(",
c = {
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setDebugSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate",
built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];

View File

@ -33,7 +33,7 @@ pub const Instruction = struct {
TypeOf,
ToPtrType,
PtrTypeChild,
SetDebugSafety,
SetRuntimeSafety,
SetFloatMode,
ArrayType,
SliceType,

View File

@ -1255,7 +1255,7 @@ enum BuiltinFnId {
BuiltinFnIdTruncate,
BuiltinFnIdIntType,
BuiltinFnIdSetCold,
BuiltinFnIdSetDebugSafety,
BuiltinFnIdSetRuntimeSafety,
BuiltinFnIdSetFloatMode,
BuiltinFnIdTypeName,
BuiltinFnIdCanImplicitCast,
@ -1836,7 +1836,7 @@ enum IrInstructionId {
IrInstructionIdToPtrType,
IrInstructionIdPtrTypeChild,
IrInstructionIdSetCold,
IrInstructionIdSetDebugSafety,
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
IrInstructionIdSliceType,
@ -2214,11 +2214,10 @@ struct IrInstructionSetCold {
IrInstruction *is_cold;
};
struct IrInstructionSetDebugSafety {
struct IrInstructionSetRuntimeSafety {
IrInstruction base;
IrInstruction *scope_value;
IrInstruction *debug_safety_on;
IrInstruction *safety_on;
};
struct IrInstructionSetFloatMode {

View File

@ -806,7 +806,7 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
return true;
}
static bool ir_want_debug_safety(CodeGen *g, IrInstruction *instruction) {
static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
if (g->build_mode == BuildModeFastRelease)
return false;
@ -901,7 +901,7 @@ static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace
LLVMBuildUnreachable(g->builder);
}
static void gen_debug_safety_crash(CodeGen *g, PanicMsgId msg_id) {
static void gen_safety_crash(CodeGen *g, PanicMsgId msg_id) {
gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr);
}
@ -1140,7 +1140,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
return fn_val;
}
static void gen_debug_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val) {
static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val) {
LLVMValueRef safety_crash_err_fn = get_safety_crash_err_fn(g);
LLVMValueRef err_ret_trace_val = g->cur_err_ret_trace_val;
if (err_ret_trace_val == nullptr) {
@ -1179,7 +1179,7 @@ static void add_bounds_check(CodeGen *g, LLVMValueRef target_val,
LLVMBuildCondBr(g->builder, lower_ok_val, lower_ok_block, bounds_check_fail_block);
LLVMPositionBuilderAtEnd(g->builder, bounds_check_fail_block);
gen_debug_safety_crash(g, PanicMsgIdBoundsCheckFailure);
gen_safety_crash(g, PanicMsgIdBoundsCheckFailure);
if (upper_value) {
LLVMPositionBuilderAtEnd(g->builder, lower_ok_block);
@ -1190,7 +1190,7 @@ static void add_bounds_check(CodeGen *g, LLVMValueRef target_val,
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_debug_safety, TypeTableEntry *actual_type,
static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, TypeTableEntry *actual_type,
TypeTableEntry *wanted_type, LLVMValueRef expr_val)
{
assert(actual_type->id == wanted_type->id);
@ -1209,7 +1209,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_debug_safety, Typ
if (actual_bits >= wanted_bits && actual_type->id == TypeTableEntryIdInt &&
!wanted_type->data.integral.is_signed && actual_type->data.integral.is_signed &&
want_debug_safety)
want_runtime_safety)
{
LLVMValueRef zero = LLVMConstNull(actual_type->type_ref);
LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntSGE, expr_val, zero, "");
@ -1219,7 +1219,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_debug_safety, Typ
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdCastNegativeToUnsigned);
gen_safety_crash(g, PanicMsgIdCastNegativeToUnsigned);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -1243,7 +1243,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_debug_safety, Typ
return LLVMBuildFPTrunc(g->builder, expr_val, wanted_type->type_ref, "");
} else if (actual_type->id == TypeTableEntryIdInt) {
LLVMValueRef trunc_val = LLVMBuildTrunc(g->builder, expr_val, wanted_type->type_ref, "");
if (!want_debug_safety) {
if (!want_runtime_safety) {
return trunc_val;
}
LLVMValueRef orig_val;
@ -1258,7 +1258,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_debug_safety, Typ
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdCastTruncatedData);
gen_safety_crash(g, PanicMsgIdCastTruncatedData);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
return trunc_val;
@ -1286,7 +1286,7 @@ static LLVMValueRef gen_overflow_op(CodeGen *g, TypeTableEntry *type_entry, AddS
LLVMBuildCondBr(g->builder, overflow_bit, fail_block, ok_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdIntegerOverflow);
gen_safety_crash(g, PanicMsgIdIntegerOverflow);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
return result;
@ -1494,7 +1494,7 @@ static LLVMValueRef gen_overflow_shl_op(CodeGen *g, TypeTableEntry *type_entry,
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdShlOverflowedBits);
gen_safety_crash(g, PanicMsgIdShlOverflowedBits);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
return result;
@ -1519,7 +1519,7 @@ static LLVMValueRef gen_overflow_shr_op(CodeGen *g, TypeTableEntry *type_entry,
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdShrOverflowedBits);
gen_safety_crash(g, PanicMsgIdShrOverflowedBits);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
return result;
@ -1565,14 +1565,14 @@ static LLVMValueRef bigint_to_llvm_const(LLVMTypeRef type_ref, BigInt *bigint) {
}
}
static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_math,
static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast_math,
LLVMValueRef val1, LLVMValueRef val2,
TypeTableEntry *type_entry, DivKind div_kind)
{
ZigLLVMSetFastMath(g->builder, want_fast_math);
LLVMValueRef zero = LLVMConstNull(type_entry->type_ref);
if (want_debug_safety && (want_fast_math || type_entry->id != TypeTableEntryIdFloat)) {
if (want_runtime_safety && (want_fast_math || type_entry->id != TypeTableEntryIdFloat)) {
LLVMValueRef is_zero_bit;
if (type_entry->id == TypeTableEntryIdInt) {
is_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, val2, zero, "");
@ -1586,7 +1586,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
LLVMBuildCondBr(g->builder, is_zero_bit, div_zero_fail_block, div_zero_ok_block);
LLVMPositionBuilderAtEnd(g->builder, div_zero_fail_block);
gen_debug_safety_crash(g, PanicMsgIdDivisionByZero);
gen_safety_crash(g, PanicMsgIdDivisionByZero);
LLVMPositionBuilderAtEnd(g->builder, div_zero_ok_block);
@ -1603,7 +1603,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
LLVMBuildCondBr(g->builder, overflow_fail_bit, overflow_fail_block, overflow_ok_block);
LLVMPositionBuilderAtEnd(g->builder, overflow_fail_block);
gen_debug_safety_crash(g, PanicMsgIdIntegerOverflow);
gen_safety_crash(g, PanicMsgIdIntegerOverflow);
LLVMPositionBuilderAtEnd(g->builder, overflow_ok_block);
}
@ -1615,7 +1615,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
case DivKindFloat:
return result;
case DivKindExact:
if (want_debug_safety) {
if (want_runtime_safety) {
LLVMValueRef floored = gen_floor(g, result, type_entry);
LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactOk");
LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactFail");
@ -1624,7 +1624,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdExactDivisionRemainder);
gen_safety_crash(g, PanicMsgIdExactDivisionRemainder);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -1672,7 +1672,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
return LLVMBuildUDiv(g->builder, val1, val2, "");
}
case DivKindExact:
if (want_debug_safety) {
if (want_runtime_safety) {
LLVMValueRef remainder_val;
if (type_entry->data.integral.is_signed) {
remainder_val = LLVMBuildSRem(g->builder, val1, val2, "");
@ -1686,7 +1686,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_debug_safety, bool want_fast_m
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdExactDivisionRemainder);
gen_safety_crash(g, PanicMsgIdExactDivisionRemainder);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -1724,14 +1724,14 @@ enum RemKind {
RemKindMod,
};
static LLVMValueRef gen_rem(CodeGen *g, bool want_debug_safety, bool want_fast_math,
static LLVMValueRef gen_rem(CodeGen *g, bool want_runtime_safety, bool want_fast_math,
LLVMValueRef val1, LLVMValueRef val2,
TypeTableEntry *type_entry, RemKind rem_kind)
{
ZigLLVMSetFastMath(g->builder, want_fast_math);
LLVMValueRef zero = LLVMConstNull(type_entry->type_ref);
if (want_debug_safety) {
if (want_runtime_safety) {
LLVMValueRef is_zero_bit;
if (type_entry->id == TypeTableEntryIdInt) {
LLVMIntPredicate pred = type_entry->data.integral.is_signed ? LLVMIntSLE : LLVMIntEQ;
@ -1746,7 +1746,7 @@ static LLVMValueRef gen_rem(CodeGen *g, bool want_debug_safety, bool want_fast_m
LLVMBuildCondBr(g->builder, is_zero_bit, rem_zero_fail_block, rem_zero_ok_block);
LLVMPositionBuilderAtEnd(g->builder, rem_zero_fail_block);
gen_debug_safety_crash(g, PanicMsgIdRemainderDivisionByZero);
gen_safety_crash(g, PanicMsgIdRemainderDivisionByZero);
LLVMPositionBuilderAtEnd(g->builder, rem_zero_ok_block);
}
@ -1792,8 +1792,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
op_id == IrBinOpBitShiftRightExact);
TypeTableEntry *type_entry = op1->value.type;
bool want_debug_safety = bin_op_instruction->safety_check_on &&
ir_want_debug_safety(g, &bin_op_instruction->base);
bool want_runtime_safety = bin_op_instruction->safety_check_on &&
ir_want_runtime_safety(g, &bin_op_instruction->base);
LLVMValueRef op1_value = ir_llvm_value(g, op1);
LLVMValueRef op2_value = ir_llvm_value(g, op2);
@ -1841,7 +1841,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
bool is_wrapping = (op_id == IrBinOpAddWrap);
if (is_wrapping) {
return LLVMBuildAdd(g->builder, op1_value, op2_value, "");
} else if (want_debug_safety) {
} else if (want_runtime_safety) {
return gen_overflow_op(g, type_entry, AddSubMulAdd, op1_value, op2_value);
} else if (type_entry->data.integral.is_signed) {
return LLVMBuildNSWAdd(g->builder, op1_value, op2_value, "");
@ -1866,7 +1866,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
bool is_sloppy = (op_id == IrBinOpBitShiftLeftLossy);
if (is_sloppy) {
return LLVMBuildShl(g->builder, op1_value, op2_casted, "");
} else if (want_debug_safety) {
} else if (want_runtime_safety) {
return gen_overflow_shl_op(g, type_entry, op1_value, op2_casted);
} else if (type_entry->data.integral.is_signed) {
return ZigLLVMBuildNSWShl(g->builder, op1_value, op2_casted, "");
@ -1887,7 +1887,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
} else {
return LLVMBuildLShr(g->builder, op1_value, op2_casted, "");
}
} else if (want_debug_safety) {
} else if (want_runtime_safety) {
return gen_overflow_shr_op(g, type_entry, op1_value, op2_casted);
} else if (type_entry->data.integral.is_signed) {
return ZigLLVMBuildAShrExact(g->builder, op1_value, op2_casted, "");
@ -1904,7 +1904,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
bool is_wrapping = (op_id == IrBinOpSubWrap);
if (is_wrapping) {
return LLVMBuildSub(g->builder, op1_value, op2_value, "");
} else if (want_debug_safety) {
} else if (want_runtime_safety) {
return gen_overflow_op(g, type_entry, AddSubMulSub, op1_value, op2_value);
} else if (type_entry->data.integral.is_signed) {
return LLVMBuildNSWSub(g->builder, op1_value, op2_value, "");
@ -1923,7 +1923,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
bool is_wrapping = (op_id == IrBinOpMultWrap);
if (is_wrapping) {
return LLVMBuildMul(g->builder, op1_value, op2_value, "");
} else if (want_debug_safety) {
} else if (want_runtime_safety) {
return gen_overflow_op(g, type_entry, AddSubMulMul, op1_value, op2_value);
} else if (type_entry->data.integral.is_signed) {
return LLVMBuildNSWMul(g->builder, op1_value, op2_value, "");
@ -1934,22 +1934,22 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
zig_unreachable();
}
case IrBinOpDivUnspecified:
return gen_div(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_div(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, DivKindFloat);
case IrBinOpDivExact:
return gen_div(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_div(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, DivKindExact);
case IrBinOpDivTrunc:
return gen_div(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_div(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, DivKindTrunc);
case IrBinOpDivFloor:
return gen_div(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_div(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, DivKindFloor);
case IrBinOpRemRem:
return gen_rem(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_rem(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, RemKindRem);
case IrBinOpRemMod:
return gen_rem(g, want_debug_safety, ir_want_fast_math(g, &bin_op_instruction->base),
return gen_rem(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, type_entry, RemKindMod);
}
zig_unreachable();
@ -2007,7 +2007,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
new_len = LLVMBuildMul(g->builder, src_len, src_size_val, "");
} else if (src_size == 1) {
LLVMValueRef dest_size_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, dest_size, false);
if (ir_want_debug_safety(g, &cast_instruction->base)) {
if (ir_want_runtime_safety(g, &cast_instruction->base)) {
LLVMValueRef remainder_val = LLVMBuildURem(g->builder, src_len, dest_size_val, "");
LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->type_ref);
LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, remainder_val, zero, "");
@ -2016,7 +2016,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdSliceWidenRemainder);
gen_safety_crash(g, PanicMsgIdSliceWidenRemainder);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -2111,7 +2111,7 @@ static LLVMValueRef ir_render_widen_or_shorten(CodeGen *g, IrExecutable *executa
int_type = actual_type;
}
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
return gen_widen_or_shorten(g, ir_want_debug_safety(g, &instruction->base), int_type,
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base), int_type,
instruction->base.value.type, target_val);
}
@ -2133,7 +2133,7 @@ static LLVMValueRef ir_render_int_to_enum(CodeGen *g, IrExecutable *executable,
TypeTableEntry *tag_int_type = wanted_type->data.enumeration.tag_int_type;
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
return gen_widen_or_shorten(g, ir_want_debug_safety(g, &instruction->base),
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
instruction->target->value.type, tag_int_type, target_val);
}
@ -2147,7 +2147,7 @@ static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, I
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
if (ir_want_debug_safety(g, &instruction->base)) {
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef zero = LLVMConstNull(actual_type->type_ref);
LLVMValueRef neq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntNE, target_val, zero, "");
LLVMValueRef ok_bit;
@ -2171,7 +2171,7 @@ static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, I
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdInvalidErrorCode);
gen_safety_crash(g, PanicMsgIdInvalidErrorCode);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -2188,11 +2188,11 @@ static LLVMValueRef ir_render_err_to_int(CodeGen *g, IrExecutable *executable, I
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
if (actual_type->id == TypeTableEntryIdPureError) {
return gen_widen_or_shorten(g, ir_want_debug_safety(g, &instruction->base),
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
g->err_tag_type, wanted_type, target_val);
} else if (actual_type->id == TypeTableEntryIdErrorUnion) {
if (!type_has_bits(actual_type->data.error.child_type)) {
return gen_widen_or_shorten(g, ir_want_debug_safety(g, &instruction->base),
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
g->err_tag_type, wanted_type, target_val);
} else {
zig_panic("TODO");
@ -2205,8 +2205,8 @@ static LLVMValueRef ir_render_err_to_int(CodeGen *g, IrExecutable *executable, I
static LLVMValueRef ir_render_unreachable(CodeGen *g, IrExecutable *executable,
IrInstructionUnreachable *unreachable_instruction)
{
if (ir_want_debug_safety(g, &unreachable_instruction->base)) {
gen_debug_safety_crash(g, PanicMsgIdUnreachable);
if (ir_want_runtime_safety(g, &unreachable_instruction->base)) {
gen_safety_crash(g, PanicMsgIdUnreachable);
} else {
LLVMBuildUnreachable(g->builder);
}
@ -2248,7 +2248,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst
} else if (expr_type->id == TypeTableEntryIdInt) {
if (op_id == IrUnOpNegationWrap) {
return LLVMBuildNeg(g->builder, expr, "");
} else if (ir_want_debug_safety(g, &un_op_instruction->base)) {
} else if (ir_want_runtime_safety(g, &un_op_instruction->base)) {
LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(expr));
return gen_overflow_op(g, expr_type, AddSubMulSub, zero, expr);
} else if (expr_type->data.integral.is_signed) {
@ -2317,7 +2317,7 @@ static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable,
var->align_bytes, 0, 0);
gen_assign_raw(g, var->value_ref, var_ptr_type, ir_llvm_value(g, init_value));
} else {
bool want_safe = ir_want_debug_safety(g, &decl_var_instruction->base);
bool want_safe = ir_want_runtime_safety(g, &decl_var_instruction->base);
if (want_safe) {
TypeTableEntry *usize = g->builtin_types.entry_usize;
uint64_t size_bytes = LLVMStoreSizeOfType(g->target_data_ref, var->value->type->type_ref);
@ -2409,7 +2409,7 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
if (!type_has_bits(array_type))
return nullptr;
bool safety_check_on = ir_want_debug_safety(g, &instruction->base) && instruction->safety_check_on;
bool safety_check_on = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on;
if (array_type->id == TypeTableEntryIdArray) {
if (safety_check_on) {
@ -2593,7 +2593,7 @@ static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executab
return bitcasted_union_field_ptr;
}
if (ir_want_debug_safety(g, &instruction->base)) {
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef tag_field_ptr = LLVMBuildStructGEP(g->builder, union_ptr, union_type->data.unionation.gen_tag_index, "");
LLVMValueRef tag_value = gen_load_untyped(g, tag_field_ptr, 0, false, "");
@ -2606,7 +2606,7 @@ static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executab
LLVMBuildCondBr(g->builder, ok_val, ok_block, bad_block);
LLVMPositionBuilderAtEnd(g->builder, bad_block);
gen_debug_safety_crash(g, PanicMsgIdBadUnionField);
gen_safety_crash(g, PanicMsgIdBadUnionField);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -2776,14 +2776,14 @@ static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable,
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type);
if (ir_want_debug_safety(g, &instruction->base) && instruction->safety_check_on) {
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) {
LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle);
LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk");
LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail");
LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdUnwrapMaybeFail);
gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -2913,7 +2913,7 @@ static LLVMValueRef ir_render_err_name(CodeGen *g, IrExecutable *executable, IrI
}
LLVMValueRef err_val = ir_llvm_value(g, instruction->value);
if (ir_want_debug_safety(g, &instruction->base)) {
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(err_val));
LLVMValueRef end_val = LLVMConstInt(LLVMTypeOf(err_val), g->error_decls.length, false);
add_bounds_check(g, err_val, LLVMIntNE, zero, LLVMIntULT, end_val);
@ -2935,7 +2935,7 @@ static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable
TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
if (ir_want_debug_safety(g, &instruction->base)) {
if (ir_want_runtime_safety(g, &instruction->base)) {
size_t field_count = enum_type->data.enumeration.src_field_count;
// if the field_count can't fit in the bits of the enum_type, then it can't possibly
@ -2988,8 +2988,8 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
assert(target_val);
bool want_debug_safety = ir_want_debug_safety(g, &instruction->base);
if (!want_debug_safety) {
bool want_runtime_safety = ir_want_runtime_safety(g, &instruction->base);
if (!want_runtime_safety) {
return target_val;
}
@ -3038,7 +3038,7 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_debug_safety_crash(g, PanicMsgIdIncorrectAlignment);
gen_safety_crash(g, PanicMsgIdIncorrectAlignment);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
@ -3176,7 +3176,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
LLVMValueRef tmp_struct_ptr = instruction->tmp_ptr;
bool want_debug_safety = instruction->safety_check_on && ir_want_debug_safety(g, &instruction->base);
bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base);
if (array_type->id == TypeTableEntryIdArray) {
LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
@ -3187,7 +3187,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
end_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false);
}
if (want_debug_safety) {
if (want_runtime_safety) {
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
if (instruction->end) {
LLVMValueRef array_end = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
@ -3198,7 +3198,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
if (!type_has_bits(array_type)) {
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_len_index, "");
// TODO if debug safety is on, store 0xaaaaaaa in ptr field
// TODO if runtime safety is on, store 0xaaaaaaa in ptr field
LLVMValueRef len_value = LLVMBuildNSWSub(g->builder, end_val, start_val, "");
gen_store_untyped(g, len_value, len_field_ptr, 0, false);
return tmp_struct_ptr;
@ -3222,7 +3222,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
LLVMValueRef end_val = ir_llvm_value(g, instruction->end);
if (want_debug_safety) {
if (want_runtime_safety) {
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
}
@ -3246,7 +3246,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
assert(len_index != SIZE_MAX);
LLVMValueRef prev_end = nullptr;
if (!instruction->end || want_debug_safety) {
if (!instruction->end || want_runtime_safety) {
LLVMValueRef src_len_ptr = LLVMBuildStructGEP(g->builder, array_ptr, (unsigned)len_index, "");
prev_end = gen_load_untyped(g, src_len_ptr, 0, false, "");
}
@ -3259,7 +3259,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
end_val = prev_end;
}
if (want_debug_safety) {
if (want_runtime_safety) {
assert(prev_end);
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
if (instruction->end) {
@ -3432,7 +3432,7 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type);
if (ir_want_debug_safety(g, &instruction->base) && instruction->safety_check_on && g->error_decls.length > 1) {
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on && g->error_decls.length > 1) {
LLVMValueRef err_val;
if (type_has_bits(child_type)) {
LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, "");
@ -3447,7 +3447,7 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
LLVMBuildCondBr(g->builder, cond_val, ok_block, err_block);
LLVMPositionBuilderAtEnd(g->builder, err_block);
gen_debug_safety_crash_for_err(g, err_val);
gen_safety_crash_for_err(g, err_val);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@ -3660,7 +3660,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdPtrTypeChild:
case IrInstructionIdFieldPtr:
case IrInstructionIdSetCold:
case IrInstructionIdSetDebugSafety:
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
case IrInstructionIdSliceType:
@ -5238,7 +5238,7 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int
create_builtin_fn(g, BuiltinFnIdSetCold, "setCold", 1);
create_builtin_fn(g, BuiltinFnIdSetDebugSafety, "setDebugSafety", 2);
create_builtin_fn(g, BuiltinFnIdSetRuntimeSafety, "setRuntimeSafety", 1);
create_builtin_fn(g, BuiltinFnIdSetFloatMode, "setFloatMode", 2);
create_builtin_fn(g, BuiltinFnIdPanic, "panic", 1);
create_builtin_fn(g, BuiltinFnIdPtrCast, "ptrCast", 2);

View File

@ -276,8 +276,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetCold *) {
return IrInstructionIdSetCold;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionSetDebugSafety *) {
return IrInstructionIdSetDebugSafety;
static constexpr IrInstructionId ir_instruction_id(IrInstructionSetRuntimeSafety *) {
return IrInstructionIdSetRuntimeSafety;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionSetFloatMode *) {
@ -1275,15 +1275,13 @@ static IrInstruction *ir_build_set_cold(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
static IrInstruction *ir_build_set_debug_safety(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *scope_value, IrInstruction *debug_safety_on)
static IrInstruction *ir_build_set_runtime_safety(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *safety_on)
{
IrInstructionSetDebugSafety *instruction = ir_build_instruction<IrInstructionSetDebugSafety>(irb, scope, source_node);
instruction->scope_value = scope_value;
instruction->debug_safety_on = debug_safety_on;
IrInstructionSetRuntimeSafety *instruction = ir_build_instruction<IrInstructionSetRuntimeSafety>(irb, scope, source_node);
instruction->safety_on = safety_on;
ir_ref_instruction(scope_value, irb->current_basic_block);
ir_ref_instruction(debug_safety_on, irb->current_basic_block);
ir_ref_instruction(safety_on, irb->current_basic_block);
return &instruction->base;
}
@ -3087,19 +3085,14 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return ir_build_set_cold(irb, scope, node, arg0_value);
}
case BuiltinFnIdSetDebugSafety:
case BuiltinFnIdSetRuntimeSafety:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
return ir_build_set_debug_safety(irb, scope, node, arg0_value, arg1_value);
return ir_build_set_runtime_safety(irb, scope, node, arg0_value);
}
case BuiltinFnIdSetFloatMode:
{
@ -11607,72 +11600,60 @@ static TypeTableEntry *ir_analyze_instruction_set_cold(IrAnalyze *ira, IrInstruc
ir_build_const_from(ira, &instruction->base);
return ira->codegen->builtin_types.entry_void;
}
static TypeTableEntry *ir_analyze_instruction_set_debug_safety(IrAnalyze *ira,
IrInstructionSetDebugSafety *set_debug_safety_instruction)
static TypeTableEntry *ir_analyze_instruction_set_runtime_safety(IrAnalyze *ira,
IrInstructionSetRuntimeSafety *set_runtime_safety_instruction)
{
IrInstruction *target_instruction = set_debug_safety_instruction->scope_value->other;
TypeTableEntry *target_type = target_instruction->value.type;
if (type_is_invalid(target_type))
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *target_val = ir_resolve_const(ira, target_instruction, UndefBad);
if (!target_val)
return ira->codegen->builtin_types.entry_invalid;
if (ira->new_irb.exec->is_inline) {
// ignore setDebugSafety when running functions at compile time
ir_build_const_from(ira, &set_debug_safety_instruction->base);
// ignore setRuntimeSafety when running functions at compile time
ir_build_const_from(ira, &set_runtime_safety_instruction->base);
return ira->codegen->builtin_types.entry_void;
}
bool *safety_off_ptr;
AstNode **safety_set_node_ptr;
if (target_type->id == TypeTableEntryIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)target_val->data.x_block;
safety_off_ptr = &block_scope->safety_off;
safety_set_node_ptr = &block_scope->safety_set_node;
} else if (target_type->id == TypeTableEntryIdFn) {
FnTableEntry *target_fn = target_val->data.x_fn.fn_entry;
assert(target_fn->def_scope);
safety_off_ptr = &target_fn->def_scope->safety_off;
safety_set_node_ptr = &target_fn->def_scope->safety_set_node;
} else if (target_type->id == TypeTableEntryIdMetaType) {
ScopeDecls *decls_scope;
TypeTableEntry *type_arg = target_val->data.x_type;
if (type_arg->id == TypeTableEntryIdStruct) {
decls_scope = type_arg->data.structure.decls_scope;
} else if (type_arg->id == TypeTableEntryIdEnum) {
decls_scope = type_arg->data.enumeration.decls_scope;
} else if (type_arg->id == TypeTableEntryIdUnion) {
decls_scope = type_arg->data.unionation.decls_scope;
Scope *scope = set_runtime_safety_instruction->base.scope;
while (scope != nullptr) {
if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope;
safety_off_ptr = &block_scope->safety_off;
safety_set_node_ptr = &block_scope->safety_set_node;
break;
} else if (scope->id == ScopeIdFnDef) {
ScopeFnDef *def_scope = (ScopeFnDef *)scope;
FnTableEntry *target_fn = def_scope->fn_entry;
assert(target_fn->def_scope != nullptr);
safety_off_ptr = &target_fn->def_scope->safety_off;
safety_set_node_ptr = &target_fn->def_scope->safety_set_node;
break;
} else if (scope->id == ScopeIdDecls) {
ScopeDecls *decls_scope = (ScopeDecls *)scope;
safety_off_ptr = &decls_scope->safety_off;
safety_set_node_ptr = &decls_scope->safety_set_node;
break;
} else {
ir_add_error_node(ira, target_instruction->source_node,
buf_sprintf("expected scope reference, found type '%s'", buf_ptr(&type_arg->name)));
return ira->codegen->builtin_types.entry_invalid;
scope = scope->parent;
continue;
}
safety_off_ptr = &decls_scope->safety_off;
safety_set_node_ptr = &decls_scope->safety_set_node;
} else {
ir_add_error_node(ira, target_instruction->source_node,
buf_sprintf("expected scope reference, found type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
assert(scope != nullptr);
IrInstruction *debug_safety_on_value = set_debug_safety_instruction->debug_safety_on->other;
bool want_debug_safety;
if (!ir_resolve_bool(ira, debug_safety_on_value, &want_debug_safety))
IrInstruction *safety_on_value = set_runtime_safety_instruction->safety_on->other;
bool want_runtime_safety;
if (!ir_resolve_bool(ira, safety_on_value, &want_runtime_safety))
return ira->codegen->builtin_types.entry_invalid;
AstNode *source_node = set_debug_safety_instruction->base.source_node;
AstNode *source_node = set_runtime_safety_instruction->base.source_node;
if (*safety_set_node_ptr) {
ErrorMsg *msg = ir_add_error_node(ira, source_node,
buf_sprintf("debug safety set twice for same scope"));
buf_sprintf("runtime safety set twice for same scope"));
add_error_note(ira->codegen, msg, *safety_set_node_ptr, buf_sprintf("first set here"));
return ira->codegen->builtin_types.entry_invalid;
}
*safety_set_node_ptr = source_node;
*safety_off_ptr = !want_debug_safety;
*safety_off_ptr = !want_runtime_safety;
ir_build_const_from(ira, &set_debug_safety_instruction->base);
ir_build_const_from(ira, &set_runtime_safety_instruction->base);
return ira->codegen->builtin_types.entry_void;
}
@ -15293,8 +15274,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_ptr_type_child(ira, (IrInstructionPtrTypeChild *)instruction);
case IrInstructionIdSetCold:
return ir_analyze_instruction_set_cold(ira, (IrInstructionSetCold *)instruction);
case IrInstructionIdSetDebugSafety:
return ir_analyze_instruction_set_debug_safety(ira, (IrInstructionSetDebugSafety *)instruction);
case IrInstructionIdSetRuntimeSafety:
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction);
case IrInstructionIdSetFloatMode:
return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction);
case IrInstructionIdSliceType:
@ -15530,7 +15511,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdReturn:
case IrInstructionIdUnreachable:
case IrInstructionIdSetCold:
case IrInstructionIdSetDebugSafety:
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdImport:
case IrInstructionIdCompileErr:

View File

@ -374,11 +374,9 @@ static void ir_print_set_cold(IrPrint *irp, IrInstructionSetCold *instruction) {
fprintf(irp->f, ")");
}
static void ir_print_set_debug_safety(IrPrint *irp, IrInstructionSetDebugSafety *instruction) {
fprintf(irp->f, "@setDebugSafety(");
ir_print_other_instruction(irp, instruction->scope_value);
fprintf(irp->f, ", ");
ir_print_other_instruction(irp, instruction->debug_safety_on);
static void ir_print_set_runtime_safety(IrPrint *irp, IrInstructionSetRuntimeSafety *instruction) {
fprintf(irp->f, "@setRuntimeSafety(");
ir_print_other_instruction(irp, instruction->safety_on);
fprintf(irp->f, ")");
}
@ -1090,8 +1088,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdSetCold:
ir_print_set_cold(irp, (IrInstructionSetCold *)instruction);
break;
case IrInstructionIdSetDebugSafety:
ir_print_set_debug_safety(irp, (IrInstructionSetDebugSafety *)instruction);
case IrInstructionIdSetRuntimeSafety:
ir_print_set_runtime_safety(irp, (IrInstructionSetRuntimeSafety *)instruction);
break;
case IrInstructionIdSetFloatMode:
ir_print_set_float_mode(irp, (IrInstructionSetFloatMode *)instruction);

View File

@ -331,7 +331,7 @@ pub fn absInt(x: var) -> %@typeOf(x) {
if (x == @minValue(@typeOf(x)))
return error.Overflow;
{
@setDebugSafety(this, false);
@setRuntimeSafety(false);
return if (x < 0) -x else x;
}
}
@ -350,7 +350,7 @@ pub const absFloat = @import("fabs.zig").fabs;
error DivisionByZero;
error Overflow;
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) -> %T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
if (denominator == 0)
return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
@ -375,7 +375,7 @@ fn testDivTrunc() {
error DivisionByZero;
error Overflow;
pub fn divFloor(comptime T: type, numerator: T, denominator: T) -> %T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
if (denominator == 0)
return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
@ -401,7 +401,7 @@ error DivisionByZero;
error Overflow;
error UnexpectedRemainder;
pub fn divExact(comptime T: type, numerator: T, denominator: T) -> %T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
if (denominator == 0)
return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
@ -431,7 +431,7 @@ fn testDivExact() {
error DivisionByZero;
error NegativeDenominator;
pub fn mod(comptime T: type, numerator: T, denominator: T) -> %T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
if (denominator == 0)
return error.DivisionByZero;
if (denominator < 0)
@ -458,7 +458,7 @@ fn testMod() {
error DivisionByZero;
error NegativeDenominator;
pub fn rem(comptime T: type, numerator: T, denominator: T) -> %T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
if (denominator == 0)
return error.DivisionByZero;
if (denominator < 0)

View File

@ -156,9 +156,9 @@ pub const FixedBufferAllocator = struct {
/// dest.len must be >= source.len.
pub fn copy(comptime T: type, dest: []T, source: []const T) {
// TODO instead of manually doing this check for the whole array
// and turning off debug safety, the compiler should detect loops like
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setDebugSafety(this, false);
@setRuntimeSafety(false);
assert(dest.len >= source.len);
for (source) |s, i| dest[i] = s;
}

View File

@ -3,7 +3,7 @@
const builtin = @import("builtin");
// Avoid dragging in the debug safety mechanisms into this .o file,
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noreturn {
if (builtin.is_test) {
@ -18,7 +18,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noret
// The semantics of memset is dictated by the corresponding
// LLVM intrinsics, not by the libc API.
export fn memset(dest: ?&u8, c: u8, n: usize) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
var index: usize = 0;
while (index != n) : (index += 1)
@ -29,7 +29,7 @@ export fn memset(dest: ?&u8, c: u8, n: usize) {
// The semantics of memcpy is dictated by the corresponding
// LLVM intrinsics, not by the libc API.
export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
var index: usize = 0;
while (index != n) : (index += 1)
@ -58,7 +58,7 @@ export fn floor(x: f64) -> f64 { return math.floor(x); }
export fn ceil(x: f64) -> f64 { return math.ceil(x); }
fn generic_fmod(comptime T: type, x: T, y: T) -> T {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
const uint = @IntType(false, T.bit_count);
const log2uint = math.Log2Int(uint);

View File

@ -1,5 +1,5 @@
pub nakedcc fn _aulldiv() {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\.intel_syntax noprefix
\\

View File

@ -1,5 +1,5 @@
pub nakedcc fn _aullrem() {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\.intel_syntax noprefix
\\

View File

@ -22,7 +22,7 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
pub extern fn __letf2(a: f128, b: f128) -> c_int {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const aInt = @bitCast(rep_t, a);
const bInt = @bitCast(rep_t, b);
@ -67,7 +67,7 @@ const GE_GREATER = c_int(1);
const GE_UNORDERED = c_int(-1); // Note: different from LE_UNORDERED
pub extern fn __getf2(a: f128, b: f128) -> c_int {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const aInt = @bitCast(srep_t, a);
const bInt = @bitCast(srep_t, b);
@ -94,7 +94,7 @@ pub extern fn __getf2(a: f128, b: f128) -> c_int {
}
pub extern fn __unordtf2(a: f128, b: f128) -> c_int {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const aAbs = @bitCast(rep_t, a) & absMask;
const bAbs = @bitCast(rep_t, b) & absMask;

View File

@ -2,7 +2,7 @@ const is_test = @import("builtin").is_test;
const Log2Int = @import("../../math/index.zig").Log2Int;
pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) -> fixuint_t {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const rep_t = switch (fp_t) {
f32 => u32,
@ -48,12 +48,12 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) -> fixuin
if (exponent < significandBits) {
// TODO this is a workaround for the mysterious "integer cast truncated bits"
// happening on the next line
@setDebugSafety(this, false);
@setRuntimeSafety(false);
return fixuint_t(significand >> Log2Int(rep_t)(significandBits - exponent));
} else {
// TODO this is a workaround for the mysterious "integer cast truncated bits"
// happening on the next line
@setDebugSafety(this, false);
@setRuntimeSafety(false);
return fixuint_t(significand) << Log2Int(fixuint_t)(exponent - significandBits);
}
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunsdfdi(a: f64) -> u64 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f64, u64, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunsdfsi(a: f64) -> u32 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f64, u32, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunsdfti(a: f64) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f64, u128, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunssfdi(a: f32) -> u64 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f32, u64, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunssfsi(a: f32) -> u32 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f32, u32, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunssfti(a: f32) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f32, u128, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunstfdi(a: f128) -> u64 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u64, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunstfsi(a: f128) -> u32 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u32, a);
}

View File

@ -2,7 +2,7 @@ const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub extern fn __fixunstfti(a: f128) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u128, a);
}

View File

@ -72,7 +72,7 @@ const assert = @import("../../index.zig").debug.assert;
const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
// Avoid dragging in the debug safety mechanisms into this .o file,
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noreturn {
@setCold(true);
@ -84,12 +84,12 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noret
}
extern fn __udivdi3(a: u64, b: u64) -> u64 {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
return __udivmoddi4(a, b, null);
}
extern fn __umoddi3(a: u64, b: u64) -> u64 {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
var r: u64 = undefined;
_ = __udivmoddi4(a, b, &r);
@ -101,7 +101,7 @@ const AeabiUlDivModResult = extern struct {
rem: u64,
};
extern fn __aeabi_uldivmod(numerator: u64, denominator: u64) -> AeabiUlDivModResult {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
var result: AeabiUlDivModResult = undefined;
result.quot = __udivmoddi4(numerator, denominator, &result.rem);
return result;
@ -133,7 +133,7 @@ fn isArmArch() -> bool {
}
nakedcc fn __aeabi_uidivmod() {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\ push { lr }
\\ sub sp, sp, #4
@ -150,7 +150,7 @@ nakedcc fn __aeabi_uidivmod() {
// This routine is windows specific
// http://msdn.microsoft.com/en-us/library/ms648426.aspx
nakedcc fn _chkstk() align(4) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\ push %%ecx
@ -174,7 +174,7 @@ nakedcc fn _chkstk() align(4) {
}
nakedcc fn __chkstk() align(4) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\ push %%rcx
@ -201,7 +201,7 @@ nakedcc fn __chkstk() align(4) {
// This routine is windows specific
// http://msdn.microsoft.com/en-us/library/ms648426.aspx
nakedcc fn __chkstk_ms() align(4) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\ push %%ecx
@ -225,7 +225,7 @@ nakedcc fn __chkstk_ms() align(4) {
}
nakedcc fn ___chkstk_ms() align(4) {
@setDebugSafety(this, false);
@setRuntimeSafety(false);
asm volatile (
\\ push %%rcx
@ -249,7 +249,7 @@ nakedcc fn ___chkstk_ms() align(4) {
}
extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) -> u32 {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const d = __udivsi3(a, b);
*rem = u32(i32(a) -% (i32(d) * i32(b)));
@ -258,7 +258,7 @@ extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) -> u32 {
extern fn __udivsi3(n: u32, d: u32) -> u32 {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const n_uword_bits: c_uint = u32.bit_count;
// special cases

View File

@ -5,7 +5,7 @@ const low = switch (builtin.endian) { builtin.Endian.Big => 1, builtin.Endian.Li
const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) -> DoubleInt {
@setDebugSafety(this, is_test);
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);

View File

@ -2,7 +2,7 @@ const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) -> u64 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}

View File

@ -2,7 +2,7 @@ const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}

View File

@ -2,6 +2,6 @@ const __udivmodti4 = @import("udivmodti4.zig").__udivmodti4;
const builtin = @import("builtin");
pub extern fn __udivti3(a: u128, b: u128) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
return __udivmodti4(a, b, null);
}

View File

@ -2,7 +2,7 @@ const __udivmodti4 = @import("udivmodti4.zig").__udivmodti4;
const builtin = @import("builtin");
pub extern fn __umodti3(a: u128, b: u128) -> u128 {
@setDebugSafety(this, builtin.is_test);
@setRuntimeSafety(builtin.is_test);
var r: u128 = undefined;
_ = __udivmodti4(a, b, &r);
return r;

View File

@ -223,13 +223,13 @@ test "comptime iterate over fn ptr list" {
assert(performFn('w', 99) == 99);
}
test "eval @setDebugSafety at compile-time" {
const result = comptime fnWithSetDebugSafety();
test "eval @setRuntimeSafety at compile-time" {
const result = comptime fnWithSetRuntimeSafety();
assert(result == 1234);
}
fn fnWithSetDebugSafety() -> i32{
@setDebugSafety(this, true);
fn fnWithSetRuntimeSafety() -> i32{
@setRuntimeSafety(true);
return 1234;
}

View File

@ -17,7 +17,7 @@ test "slice child property" {
assert(@typeOf(slice).Child == i32);
}
test "debug safety lets us slice from len..len" {
test "runtime safety lets us slice from len..len" {
var an_array = []u8{1, 2, 3};
assert(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
}

View File

@ -1867,13 +1867,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) {
,
".tmp_source.zig:3:20: error: cast from 'u16' to 'u8' truncates bits");
cases.add("@setDebugSafety twice for same scope",
cases.add("@setRuntimeSafety twice for same scope",
\\export fn foo() {
\\ @setDebugSafety(this, false);
\\ @setDebugSafety(this, false);
\\ @setRuntimeSafety(false);
\\ @setRuntimeSafety(false);
\\}
,
".tmp_source.zig:3:5: error: debug safety set twice for same scope",
".tmp_source.zig:3:5: error: runtime safety set twice for same scope",
".tmp_source.zig:2:5: note: first set here");
cases.add("@setFloatMode twice for same scope",

View File

@ -1,7 +1,7 @@
const tests = @import("tests.zig");
pub fn addCases(cases: &tests.CompareOutputContext) {
cases.addDebugSafety("calling panic",
cases.addRuntimeSafety("calling panic",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -10,7 +10,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("out of bounds slice access",
cases.addRuntimeSafety("out of bounds slice access",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -24,7 +24,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\fn baz(a: i32) { }
);
cases.addDebugSafety("integer addition overflow",
cases.addRuntimeSafety("integer addition overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -38,7 +38,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("integer subtraction overflow",
cases.addRuntimeSafety("integer subtraction overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -52,7 +52,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("integer multiplication overflow",
cases.addRuntimeSafety("integer multiplication overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -66,7 +66,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("integer negation overflow",
cases.addRuntimeSafety("integer negation overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -80,7 +80,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("signed integer division overflow",
cases.addRuntimeSafety("signed integer division overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -94,7 +94,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("signed shift left overflow",
cases.addRuntimeSafety("signed shift left overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -108,7 +108,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("unsigned shift left overflow",
cases.addRuntimeSafety("unsigned shift left overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -122,7 +122,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("signed shift right overflow",
cases.addRuntimeSafety("signed shift right overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -136,7 +136,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("unsigned shift right overflow",
cases.addRuntimeSafety("unsigned shift right overflow",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -150,7 +150,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("integer division by zero",
cases.addRuntimeSafety("integer division by zero",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -163,7 +163,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("exact division failure",
cases.addRuntimeSafety("exact division failure",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -177,7 +177,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("cast []u8 to bigger slice of wrong size",
cases.addRuntimeSafety("cast []u8 to bigger slice of wrong size",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -191,7 +191,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("value does not fit in shortening cast",
cases.addRuntimeSafety("value does not fit in shortening cast",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -205,7 +205,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("signed integer not fitting in cast to unsigned integer",
cases.addRuntimeSafety("signed integer not fitting in cast to unsigned integer",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -219,7 +219,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("unwrap error",
cases.addRuntimeSafety("unwrap error",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ if (@import("std").mem.eql(u8, message, "attempt to unwrap error: Whatever")) {
\\ @import("std").os.exit(126); // good
@ -235,7 +235,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("cast integer to error and no code matches",
cases.addRuntimeSafety("cast integer to error and no code matches",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -247,7 +247,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("@alignCast misaligned",
cases.addRuntimeSafety("@alignCast misaligned",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}
@ -264,7 +264,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
\\}
);
cases.addDebugSafety("bad union field access",
cases.addRuntimeSafety("bad union field access",
\\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn {
\\ @import("std").os.exit(126);
\\}

View File

@ -17,7 +17,7 @@ const compare_output = @import("compare_output.zig");
const build_examples = @import("build_examples.zig");
const compile_errors = @import("compile_errors.zig");
const assemble_and_link = @import("assemble_and_link.zig");
const debug_safety = @import("debug_safety.zig");
const runtime_safety = @import("runtime_safety.zig");
const translate_c = @import("translate_c.zig");
const gen_h = @import("gen_h.zig");
@ -64,16 +64,16 @@ pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) -> &bu
return cases.step;
}
pub fn addDebugSafetyTests(b: &build.Builder, test_filter: ?[]const u8) -> &build.Step {
pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) -> &build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
*cases = CompareOutputContext {
.b = b,
.step = b.step("test-debug-safety", "Run the debug safety tests"),
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
};
debug_safety.addCases(cases);
runtime_safety.addCases(cases);
return cases.step;
}
@ -192,7 +192,7 @@ pub const CompareOutputContext = struct {
const Special = enum {
None,
Asm,
DebugSafety,
RuntimeSafety,
};
const TestCase = struct {
@ -314,7 +314,7 @@ pub const CompareOutputContext = struct {
}
};
const DebugSafetyRunStep = struct {
const RuntimeSafetyRunStep = struct {
step: build.Step,
context: &CompareOutputContext,
exe_path: []const u8,
@ -322,23 +322,23 @@ pub const CompareOutputContext = struct {
test_index: usize,
pub fn create(context: &CompareOutputContext, exe_path: []const u8,
name: []const u8) -> &DebugSafetyRunStep
name: []const u8) -> &RuntimeSafetyRunStep
{
const allocator = context.b.allocator;
const ptr = allocator.create(DebugSafetyRunStep) catch unreachable;
*ptr = DebugSafetyRunStep {
const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
*ptr = RuntimeSafetyRunStep {
.context = context,
.exe_path = exe_path,
.name = name,
.test_index = context.test_index,
.step = build.Step.init("DebugSafetyRun", allocator, make),
.step = build.Step.init("RuntimeSafetyRun", allocator, make),
};
context.test_index += 1;
return ptr;
}
fn make(step: &build.Step) -> %void {
const self = @fieldParentPtr(DebugSafetyRunStep, "step", step);
const self = @fieldParentPtr(RuntimeSafetyRunStep, "step", step);
const b = self.context.b;
const full_exe_path = b.pathFromRoot(self.exe_path);
@ -420,8 +420,8 @@ pub const CompareOutputContext = struct {
self.addCase(tc);
}
pub fn addDebugSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) {
const tc = self.createExtra(name, source, undefined, Special.DebugSafety);
pub fn addRuntimeSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) {
const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
self.addCase(tc);
}
@ -481,7 +481,7 @@ pub const CompareOutputContext = struct {
self.step.dependOn(&run_and_cmp_output.step);
}
},
Special.DebugSafety => {
Special.RuntimeSafety => {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "safety {}", case.name) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null)
@ -499,7 +499,7 @@ pub const CompareOutputContext = struct {
exe.step.dependOn(&write_src.step);
}
const run_and_cmp_output = DebugSafetyRunStep.create(self, exe.getOutputPath(), annotated_case_name);
const run_and_cmp_output = RuntimeSafetyRunStep.create(self, exe.getOutputPath(), annotated_case_name);
run_and_cmp_output.step.dependOn(&exe.step);
self.step.dependOn(&run_and_cmp_output.step);