add atomicrmw builtin function
This commit is contained in:
parent
36eadb569a
commit
807a5e94e9
@ -3775,6 +3775,25 @@ pub fn main() void {
|
||||
{#header_open|@ArgType#}
|
||||
<p>TODO</p>
|
||||
{#header_close#}
|
||||
{#header_open|@atomicRmw#}
|
||||
<pre><code class="zig">@atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T</code></pre>
|
||||
<p>
|
||||
This builtin function atomically modifies memory and then returns the previous value.
|
||||
</p>
|
||||
<p>
|
||||
<code>T</code> must be a pointer type, a <code>bool</code>,
|
||||
or an integer whose bit count meets these requirements:
|
||||
</p>
|
||||
<ul>
|
||||
<li>At least 8</li>
|
||||
<li>At most the same as usize</li>
|
||||
<li>Power of 2</li>
|
||||
</ul>
|
||||
<p>
|
||||
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
|
||||
we can remove this restriction
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@bitCast#}
|
||||
<pre><code class="zig">@bitCast(comptime DestType: type, value: var) -> DestType</code></pre>
|
||||
<p>
|
||||
@ -5859,7 +5878,7 @@ hljs.registerLanguage("zig", function(t) {
|
||||
a = t.IR + "\\s*\\(",
|
||||
c = {
|
||||
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
|
||||
built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate",
|
||||
built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate atomicRmw",
|
||||
literal: "true false null undefined"
|
||||
},
|
||||
n = [e, t.CLCM, t.CBCM, s, r];
|
||||
|
@ -1338,6 +1338,7 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdArgType,
|
||||
BuiltinFnIdExport,
|
||||
BuiltinFnIdErrorReturnTrace,
|
||||
BuiltinFnIdAtomicRmw,
|
||||
};
|
||||
|
||||
struct BuiltinFnEntry {
|
||||
@ -1857,6 +1858,19 @@ enum AtomicOrder {
|
||||
AtomicOrderSeqCst,
|
||||
};
|
||||
|
||||
// synchronized with the code in define_builtin_compile_vars
|
||||
enum AtomicRmwOp {
|
||||
AtomicRmwOp_xchg,
|
||||
AtomicRmwOp_add,
|
||||
AtomicRmwOp_sub,
|
||||
AtomicRmwOp_and,
|
||||
AtomicRmwOp_nand,
|
||||
AtomicRmwOp_or,
|
||||
AtomicRmwOp_xor,
|
||||
AtomicRmwOp_max,
|
||||
AtomicRmwOp_min,
|
||||
};
|
||||
|
||||
// A basic block contains no branching. Branches send control flow
|
||||
// to another basic block.
|
||||
// Phi instructions must be first in a basic block.
|
||||
@ -2006,6 +2020,7 @@ enum IrInstructionId {
|
||||
IrInstructionIdCoroResume,
|
||||
IrInstructionIdCoroSave,
|
||||
IrInstructionIdCoroAllocHelper,
|
||||
IrInstructionIdAtomicRmw,
|
||||
};
|
||||
|
||||
struct IrInstruction {
|
||||
@ -2929,6 +2944,18 @@ struct IrInstructionCoroAllocHelper {
|
||||
IrInstruction *coro_size;
|
||||
};
|
||||
|
||||
struct IrInstructionAtomicRmw {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *operand_type;
|
||||
IrInstruction *ptr;
|
||||
IrInstruction *op;
|
||||
AtomicRmwOp resolved_op;
|
||||
IrInstruction *operand;
|
||||
IrInstruction *ordering;
|
||||
AtomicOrder resolved_ordering;
|
||||
};
|
||||
|
||||
static const size_t slice_ptr_index = 0;
|
||||
static const size_t slice_len_index = 1;
|
||||
|
||||
|
@ -3311,6 +3311,23 @@ static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) {
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
static LLVMAtomicRMWBinOp to_LLVMAtomicRMWBinOp(AtomicRmwOp op, bool is_signed) {
|
||||
switch (op) {
|
||||
case AtomicRmwOp_xchg: return LLVMAtomicRMWBinOpXchg;
|
||||
case AtomicRmwOp_add: return LLVMAtomicRMWBinOpAdd;
|
||||
case AtomicRmwOp_sub: return LLVMAtomicRMWBinOpSub;
|
||||
case AtomicRmwOp_and: return LLVMAtomicRMWBinOpAnd;
|
||||
case AtomicRmwOp_nand: return LLVMAtomicRMWBinOpNand;
|
||||
case AtomicRmwOp_or: return LLVMAtomicRMWBinOpOr;
|
||||
case AtomicRmwOp_xor: return LLVMAtomicRMWBinOpXor;
|
||||
case AtomicRmwOp_max:
|
||||
return is_signed ? LLVMAtomicRMWBinOpMax : LLVMAtomicRMWBinOpUMax;
|
||||
case AtomicRmwOp_min:
|
||||
return is_signed ? LLVMAtomicRMWBinOpMin : LLVMAtomicRMWBinOpUMin;
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrInstructionCmpxchg *instruction) {
|
||||
LLVMValueRef ptr_val = ir_llvm_value(g, instruction->ptr);
|
||||
LLVMValueRef cmp_val = ir_llvm_value(g, instruction->cmp_value);
|
||||
@ -4111,6 +4128,22 @@ static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *execut
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
|
||||
IrInstructionAtomicRmw *instruction)
|
||||
{
|
||||
bool is_signed;
|
||||
if (instruction->operand->value.type->id == TypeTableEntryIdInt) {
|
||||
is_signed = instruction->operand->value.type->data.integral.is_signed;
|
||||
} else {
|
||||
is_signed = false;
|
||||
}
|
||||
LLVMAtomicRMWBinOp op = to_LLVMAtomicRMWBinOp(instruction->resolved_op, is_signed);
|
||||
LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering);
|
||||
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
|
||||
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
|
||||
return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false);
|
||||
}
|
||||
|
||||
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
|
||||
AstNode *source_node = instruction->source_node;
|
||||
Scope *scope = instruction->scope;
|
||||
@ -4318,6 +4351,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
|
||||
return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction);
|
||||
case IrInstructionIdCoroAllocHelper:
|
||||
return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction);
|
||||
case IrInstructionIdAtomicRmw:
|
||||
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
@ -5810,6 +5845,7 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdArgType, "ArgType", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdExport, "export", 3);
|
||||
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
|
||||
}
|
||||
|
||||
static const char *bool_to_str(bool b) {
|
||||
@ -5939,6 +5975,20 @@ static void define_builtin_compile_vars(CodeGen *g) {
|
||||
" SeqCst,\n"
|
||||
"};\n\n");
|
||||
}
|
||||
{
|
||||
buf_appendf(contents,
|
||||
"pub const AtomicRmwOp = enum {\n"
|
||||
" Xchg,\n"
|
||||
" Add,\n"
|
||||
" Sub,\n"
|
||||
" And,\n"
|
||||
" Nand,\n"
|
||||
" Or,\n"
|
||||
" Xor,\n"
|
||||
" Max,\n"
|
||||
" Min,\n"
|
||||
"};\n\n");
|
||||
}
|
||||
{
|
||||
buf_appendf(contents,
|
||||
"pub const Mode = enum {\n"
|
||||
|
149
src/ir.cpp
149
src/ir.cpp
@ -701,6 +701,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper
|
||||
return IrInstructionIdCoroAllocHelper;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
|
||||
return IrInstructionIdAtomicRmw;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
|
||||
T *special_instruction = allocate<T>(1);
|
||||
@ -2614,6 +2618,28 @@ static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, A
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand,
|
||||
IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering)
|
||||
{
|
||||
IrInstructionAtomicRmw *instruction = ir_build_instruction<IrInstructionAtomicRmw>(irb, scope, source_node);
|
||||
instruction->operand_type = operand_type;
|
||||
instruction->ptr = ptr;
|
||||
instruction->op = op;
|
||||
instruction->operand = operand;
|
||||
instruction->ordering = ordering;
|
||||
instruction->resolved_op = resolved_op;
|
||||
instruction->resolved_ordering = resolved_ordering;
|
||||
|
||||
if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block);
|
||||
ir_ref_instruction(ptr, irb->current_basic_block);
|
||||
if (op != nullptr) ir_ref_instruction(op, irb->current_basic_block);
|
||||
ir_ref_instruction(operand, irb->current_basic_block);
|
||||
if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block);
|
||||
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
|
||||
results[ReturnKindUnconditional] = 0;
|
||||
results[ReturnKindError] = 0;
|
||||
@ -4094,6 +4120,38 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
||||
{
|
||||
return ir_build_error_return_trace(irb, scope, node);
|
||||
}
|
||||
case BuiltinFnIdAtomicRmw:
|
||||
{
|
||||
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
||||
IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
||||
if (arg0_value == irb->codegen->invalid_instruction)
|
||||
return arg0_value;
|
||||
|
||||
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
||||
IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
||||
if (arg1_value == irb->codegen->invalid_instruction)
|
||||
return arg1_value;
|
||||
|
||||
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
||||
IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
||||
if (arg2_value == irb->codegen->invalid_instruction)
|
||||
return arg2_value;
|
||||
|
||||
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
||||
IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
||||
if (arg3_value == irb->codegen->invalid_instruction)
|
||||
return arg3_value;
|
||||
|
||||
AstNode *arg4_node = node->data.fn_call_expr.params.at(4);
|
||||
IrInstruction *arg4_value = ir_gen_node(irb, arg4_node, scope);
|
||||
if (arg4_value == irb->codegen->invalid_instruction)
|
||||
return arg4_value;
|
||||
|
||||
return ir_build_atomic_rmw(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value,
|
||||
arg4_value,
|
||||
// these 2 values don't mean anything since we passed non-null values for other args
|
||||
AtomicRmwOp_xchg, AtomicOrderMonotonic);
|
||||
}
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
@ -9730,6 +9788,26 @@ static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstruction *value, Atomic
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ir_resolve_atomic_rmw_op(IrAnalyze *ira, IrInstruction *value, AtomicRmwOp *out) {
|
||||
if (type_is_invalid(value->value.type))
|
||||
return false;
|
||||
|
||||
ConstExprValue *atomic_rmw_op_val = get_builtin_value(ira->codegen, "AtomicRmwOp");
|
||||
assert(atomic_rmw_op_val->type->id == TypeTableEntryIdMetaType);
|
||||
TypeTableEntry *atomic_rmw_op_type = atomic_rmw_op_val->data.x_type;
|
||||
|
||||
IrInstruction *casted_value = ir_implicit_cast(ira, value, atomic_rmw_op_type);
|
||||
if (type_is_invalid(casted_value->value.type))
|
||||
return false;
|
||||
|
||||
ConstExprValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
|
||||
if (!const_val)
|
||||
return false;
|
||||
|
||||
*out = (AtomicRmwOp)bigint_as_unsigned(&const_val->data.x_enum_tag);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ir_resolve_global_linkage(IrAnalyze *ira, IrInstruction *value, GlobalLinkageId *out) {
|
||||
if (type_is_invalid(value->value.type))
|
||||
return false;
|
||||
@ -17316,6 +17394,74 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira,
|
||||
return result->value.type;
|
||||
}
|
||||
|
||||
static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) {
|
||||
TypeTableEntry *operand_type = ir_resolve_type(ira, instruction->operand_type->other);
|
||||
if (type_is_invalid(operand_type)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (operand_type->id == TypeTableEntryIdInt) {
|
||||
if (operand_type->data.integral.bit_count < 8) {
|
||||
ir_add_error(ira, &instruction->base,
|
||||
buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type",
|
||||
operand_type->data.integral.bit_count));
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (operand_type->data.integral.bit_count > ira->codegen->pointer_size_bytes * 8) {
|
||||
ir_add_error(ira, &instruction->base,
|
||||
buf_sprintf("expected integer type pointer size or smaller, found %" PRIu32 "-bit integer type",
|
||||
operand_type->data.integral.bit_count));
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
if (!is_power_of_2(operand_type->data.integral.bit_count)) {
|
||||
ir_add_error(ira, &instruction->base,
|
||||
buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count));
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
} else if (get_codegen_ptr_type(operand_type) == nullptr) {
|
||||
ir_add_error(ira, &instruction->base,
|
||||
buf_sprintf("expected integer or pointer type, found '%s'", buf_ptr(&operand_type->name)));
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
|
||||
IrInstruction *ptr_inst = instruction->ptr->other;
|
||||
if (type_is_invalid(ptr_inst->value.type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
|
||||
IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
||||
if (type_is_invalid(casted_ptr->value.type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
AtomicRmwOp op;
|
||||
if (!ir_resolve_atomic_rmw_op(ira, instruction->op->other, &op)) {
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
}
|
||||
|
||||
IrInstruction *operand = instruction->operand->other;
|
||||
if (type_is_invalid(operand->value.type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, operand_type);
|
||||
if (type_is_invalid(casted_ptr->value.type))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
AtomicOrder ordering;
|
||||
if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering))
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)
|
||||
{
|
||||
zig_panic("TODO compile-time execution of atomicRmw");
|
||||
}
|
||||
|
||||
IrInstruction *result = ir_build_atomic_rmw(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node, nullptr, casted_ptr, nullptr, casted_operand, nullptr,
|
||||
op, ordering);
|
||||
ir_link_new_instruction(result, &instruction->base);
|
||||
result->value.type = operand_type;
|
||||
return result->value.type;
|
||||
}
|
||||
|
||||
|
||||
static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) {
|
||||
switch (instruction->id) {
|
||||
@ -17545,6 +17691,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
|
||||
return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction);
|
||||
case IrInstructionIdCoroAllocHelper:
|
||||
return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction);
|
||||
case IrInstructionIdAtomicRmw:
|
||||
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
@ -17748,6 +17896,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
|
||||
case IrInstructionIdCoroSize:
|
||||
case IrInstructionIdCoroSuspend:
|
||||
case IrInstructionIdCoroFree:
|
||||
case IrInstructionIdAtomicRmw:
|
||||
return false;
|
||||
|
||||
case IrInstructionIdAsm:
|
||||
|
@ -1113,6 +1113,32 @@ static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelpe
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
|
||||
fprintf(irp->f, "@atomicRmw(");
|
||||
if (instruction->operand_type != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->operand_type);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->ptr);
|
||||
fprintf(irp->f, ",");
|
||||
if (instruction->op != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->op);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->operand);
|
||||
fprintf(irp->f, ",");
|
||||
if (instruction->ordering != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->ordering);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
|
||||
ir_print_prefix(irp, instruction);
|
||||
switch (instruction->id) {
|
||||
@ -1472,6 +1498,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
|
||||
case IrInstructionIdCoroAllocHelper:
|
||||
ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction);
|
||||
break;
|
||||
case IrInstructionIdAtomicRmw:
|
||||
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
|
||||
break;
|
||||
}
|
||||
fprintf(irp->f, "\n");
|
||||
}
|
||||
|
@ -97,21 +97,18 @@ pub fn assertOrPanic(ok: bool) void {
|
||||
}
|
||||
}
|
||||
|
||||
var panicking = false;
|
||||
var panicking: u8 = 0; // TODO make this a bool
|
||||
/// This is the default panic implementation.
|
||||
pub fn panic(comptime format: []const u8, args: ...) noreturn {
|
||||
// TODO an intrinsic that labels this as unlikely to be reached
|
||||
@setCold(true);
|
||||
|
||||
// TODO
|
||||
// if (@atomicRmw(AtomicOp.XChg, &panicking, true, AtomicOrder.SeqCst)) { }
|
||||
if (panicking) {
|
||||
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
|
||||
// Panicked during a panic.
|
||||
|
||||
// TODO detect if a different thread caused the panic, because in that case
|
||||
// we would want to return here instead of calling abort, so that the thread
|
||||
// which first called panic can finish printing a stack trace.
|
||||
os.abort();
|
||||
} else {
|
||||
panicking = true;
|
||||
}
|
||||
|
||||
const stderr = getStderrStream() catch os.abort();
|
||||
@ -122,10 +119,11 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
|
||||
}
|
||||
|
||||
pub fn panicWithTrace(trace: &const builtin.StackTrace, comptime format: []const u8, args: ...) noreturn {
|
||||
if (panicking) {
|
||||
@setCold(true);
|
||||
|
||||
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
|
||||
// See TODO in above function
|
||||
os.abort();
|
||||
} else {
|
||||
panicking = true;
|
||||
}
|
||||
const stderr = getStderrStream() catch os.abort();
|
||||
stderr.print(format ++ "\n", args) catch os.abort();
|
||||
|
@ -1,5 +1,7 @@
|
||||
const assert = @import("std").debug.assert;
|
||||
const AtomicOrder = @import("builtin").AtomicOrder;
|
||||
const builtin = @import("builtin");
|
||||
const AtomicRmwOp = builtin.AtomicRmwOp;
|
||||
const AtomicOrder = builtin.AtomicOrder;
|
||||
|
||||
test "cmpxchg" {
|
||||
var x: i32 = 1234;
|
||||
@ -12,3 +14,14 @@ test "fence" {
|
||||
@fence(AtomicOrder.SeqCst);
|
||||
x = 5678;
|
||||
}
|
||||
|
||||
test "atomicrmw" {
|
||||
var data: u8 = 200;
|
||||
testAtomicRmw(&data);
|
||||
assert(data == 42);
|
||||
}
|
||||
|
||||
fn testAtomicRmw(ptr: &u8) void {
|
||||
const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst);
|
||||
assert(prev_value == 200);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user