add @atomicStore builtin
This commit is contained in:
parent
956ba8b0e7
commit
110ef2e528
@ -6612,14 +6612,14 @@ async fn func(y: *i32) void {
|
||||
This builtin function atomically dereferences a pointer and returns the value.
|
||||
</p>
|
||||
<p>
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
|
||||
or an integer whose bit count meets these requirements:
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
|
||||
an integer whose bit count meets these requirements:
|
||||
</p>
|
||||
<ul>
|
||||
<li>At least 8</li>
|
||||
<li>At most the same as usize</li>
|
||||
<li>Power of 2</li>
|
||||
</ul>
|
||||
</ul> or an enum with a valid integer tag type.
|
||||
<p>
|
||||
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
|
||||
we can remove this restriction
|
||||
@ -6660,6 +6660,25 @@ async fn func(y: *i32) void {
|
||||
<li>{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.</li>
|
||||
</ul>
|
||||
{#header_close#}
|
||||
{#header_open|@atomicStore#}
|
||||
<pre>{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}</pre>
|
||||
<p>
|
||||
This builtin function atomically stores a value.
|
||||
</p>
|
||||
<p>
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
|
||||
an integer whose bit count meets these requirements:
|
||||
</p>
|
||||
<ul>
|
||||
<li>At least 8</li>
|
||||
<li>At most the same as usize</li>
|
||||
<li>Power of 2</li>
|
||||
</ul> or an enum with a valid integer tag type.
|
||||
<p>
|
||||
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
|
||||
we can remove this restriction
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@bitCast#}
|
||||
<pre>{#syntax#}@bitCast(comptime DestType: type, value: var) DestType{#endsyntax#}</pre>
|
||||
<p>
|
||||
|
@ -1700,6 +1700,7 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdErrorReturnTrace,
|
||||
BuiltinFnIdAtomicRmw,
|
||||
BuiltinFnIdAtomicLoad,
|
||||
BuiltinFnIdAtomicStore,
|
||||
BuiltinFnIdHasDecl,
|
||||
BuiltinFnIdUnionInit,
|
||||
BuiltinFnIdFrameAddress,
|
||||
@ -2569,6 +2570,7 @@ enum IrInstructionId {
|
||||
IrInstructionIdErrorUnion,
|
||||
IrInstructionIdAtomicRmw,
|
||||
IrInstructionIdAtomicLoad,
|
||||
IrInstructionIdAtomicStore,
|
||||
IrInstructionIdSaveErrRetAddr,
|
||||
IrInstructionIdAddImplicitReturnType,
|
||||
IrInstructionIdErrSetCast,
|
||||
@ -3713,6 +3715,16 @@ struct IrInstructionAtomicLoad {
|
||||
AtomicOrder resolved_ordering;
|
||||
};
|
||||
|
||||
struct IrInstructionAtomicStore {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *operand_type;
|
||||
IrInstruction *ptr;
|
||||
IrInstruction *value;
|
||||
IrInstruction *ordering;
|
||||
AtomicOrder resolved_ordering;
|
||||
};
|
||||
|
||||
struct IrInstructionSaveErrRetAddr {
|
||||
IrInstruction base;
|
||||
};
|
||||
|
@ -5650,6 +5650,17 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
|
||||
return load_inst;
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutable *executable,
|
||||
IrInstructionAtomicStore *instruction)
|
||||
{
|
||||
LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering);
|
||||
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
|
||||
LLVMValueRef value = ir_llvm_value(g, instruction->value);
|
||||
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value.type);
|
||||
LLVMSetOrdering(store_inst, ordering);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
|
||||
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
|
||||
assert(instruction->base.value.type->id == ZigTypeIdFloat);
|
||||
@ -6253,6 +6264,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
|
||||
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
|
||||
case IrInstructionIdAtomicStore:
|
||||
return ir_render_atomic_store(g, executable, (IrInstructionAtomicStore *)instruction);
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
|
||||
case IrInstructionIdFloatOp:
|
||||
@ -8064,6 +8077,7 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicStore, "atomicStore", 4);
|
||||
create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
|
||||
create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
|
||||
|
103
src/ir.cpp
103
src/ir.cpp
@ -1009,6 +1009,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
|
||||
return IrInstructionIdAtomicLoad;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicStore *) {
|
||||
return IrInstructionIdAtomicStore;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
|
||||
return IrInstructionIdSaveErrRetAddr;
|
||||
}
|
||||
@ -3186,6 +3190,25 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_atomic_store(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *value,
|
||||
IrInstruction *ordering, AtomicOrder resolved_ordering)
|
||||
{
|
||||
IrInstructionAtomicStore *instruction = ir_build_instruction<IrInstructionAtomicStore>(irb, scope, source_node);
|
||||
instruction->operand_type = operand_type;
|
||||
instruction->ptr = ptr;
|
||||
instruction->value = value;
|
||||
instruction->ordering = ordering;
|
||||
instruction->resolved_ordering = resolved_ordering;
|
||||
|
||||
if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block);
|
||||
ir_ref_instruction(ptr, irb->current_basic_block);
|
||||
ir_ref_instruction(value, irb->current_basic_block);
|
||||
if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block);
|
||||
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) {
|
||||
IrInstructionSaveErrRetAddr *instruction = ir_build_instruction<IrInstructionSaveErrRetAddr>(irb, scope, source_node);
|
||||
return &instruction->base;
|
||||
@ -5730,6 +5753,33 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
||||
AtomicOrderMonotonic);
|
||||
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdAtomicStore:
|
||||
{
|
||||
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
||||
IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
||||
if (arg0_value == irb->codegen->invalid_instruction)
|
||||
return arg0_value;
|
||||
|
||||
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
||||
IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
||||
if (arg1_value == irb->codegen->invalid_instruction)
|
||||
return arg1_value;
|
||||
|
||||
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
||||
IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
||||
if (arg2_value == irb->codegen->invalid_instruction)
|
||||
return arg2_value;
|
||||
|
||||
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
||||
IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
||||
if (arg3_value == irb->codegen->invalid_instruction)
|
||||
return arg3_value;
|
||||
|
||||
IrInstruction *inst = ir_build_atomic_store(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value,
|
||||
// this value does not mean anything since we passed non-null values for other arg
|
||||
AtomicOrderMonotonic);
|
||||
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdIntToEnum:
|
||||
{
|
||||
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
||||
@ -25748,6 +25798,56 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstructionAtomicStore *instruction) {
|
||||
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
|
||||
if (type_is_invalid(operand_type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *ptr_inst = instruction->ptr->child;
|
||||
if (type_is_invalid(ptr_inst->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, true);
|
||||
IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
||||
if (type_is_invalid(casted_ptr->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *value = instruction->value->child;
|
||||
if (type_is_invalid(value->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *casted_value = ir_implicit_cast(ira, value, operand_type);
|
||||
if (type_is_invalid(casted_value->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
|
||||
AtomicOrder ordering;
|
||||
if (instruction->ordering == nullptr) {
|
||||
ordering = instruction->resolved_ordering;
|
||||
} else {
|
||||
if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering))
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
if (ordering == AtomicOrderAcquire || ordering == AtomicOrderAcqRel) {
|
||||
ir_assert(instruction->ordering != nullptr, &instruction->base);
|
||||
ir_add_error(ira, instruction->ordering,
|
||||
buf_sprintf("@atomicStore atomic ordering must not be Acquire or AcqRel"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) {
|
||||
IrInstruction *result = ir_get_deref(ira, &instruction->base, casted_ptr, nullptr);
|
||||
ir_assert(result->value.type != nullptr, &instruction->base);
|
||||
return result;
|
||||
}
|
||||
|
||||
IrInstruction *result = ir_build_atomic_store(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node, nullptr, casted_ptr, casted_value, nullptr, ordering);
|
||||
result->value.type = ira->codegen->builtin_types.entry_void;
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) {
|
||||
IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node);
|
||||
@ -26782,6 +26882,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
|
||||
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
|
||||
case IrInstructionIdAtomicStore:
|
||||
return ir_analyze_instruction_atomic_store(ira, (IrInstructionAtomicStore *)instruction);
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
@ -26962,6 +27064,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
case IrInstructionIdAtomicRmw:
|
||||
case IrInstructionIdAtomicStore:
|
||||
case IrInstructionIdCmpxchgGen:
|
||||
case IrInstructionIdCmpxchgSrc:
|
||||
case IrInstructionIdAssertZero:
|
||||
|
@ -324,6 +324,8 @@ const char* ir_instruction_type_str(IrInstructionId id) {
|
||||
return "AtomicRmw";
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return "AtomicLoad";
|
||||
case IrInstructionIdAtomicStore:
|
||||
return "AtomicStore";
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return "SaveErrRetAddr";
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
@ -1871,6 +1873,27 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
static void ir_print_atomic_store(IrPrint *irp, IrInstructionAtomicStore *instruction) {
|
||||
fprintf(irp->f, "@atomicStore(");
|
||||
if (instruction->operand_type != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->operand_type);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->ptr);
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->value);
|
||||
fprintf(irp->f, ",");
|
||||
if (instruction->ordering != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->ordering);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
|
||||
static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) {
|
||||
fprintf(irp->f, "@saveErrRetAddr()");
|
||||
}
|
||||
@ -2431,6 +2454,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool
|
||||
case IrInstructionIdAtomicLoad:
|
||||
ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction);
|
||||
break;
|
||||
case IrInstructionIdAtomicStore:
|
||||
ir_print_atomic_store(irp, (IrInstructionAtomicStore *)instruction);
|
||||
break;
|
||||
case IrInstructionIdEnumToInt:
|
||||
ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction);
|
||||
break;
|
||||
|
@ -123,3 +123,11 @@ test "atomic load and rmw with enum" {
|
||||
expect(@atomicLoad(Value, &x, .SeqCst) != .a);
|
||||
expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
}
|
||||
|
||||
test "atomic store" {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user