Merge branch 'master' into self-hosted
commit
7c91a055c1
|
@ -1270,6 +1270,7 @@ enum BuiltinFnId {
|
|||
BuiltinFnIdFieldParentPtr,
|
||||
BuiltinFnIdOffsetOf,
|
||||
BuiltinFnIdInlineCall,
|
||||
BuiltinFnIdNoInlineCall,
|
||||
BuiltinFnIdTypeId,
|
||||
BuiltinFnIdShlExact,
|
||||
BuiltinFnIdShrExact,
|
||||
|
@ -1439,7 +1440,7 @@ struct CodeGen {
|
|||
|
||||
struct {
|
||||
TypeTableEntry *entry_bool;
|
||||
TypeTableEntry *entry_int[2][11]; // [signed,unsigned][2,3,4,5,6,7,8,16,32,64,128]
|
||||
TypeTableEntry *entry_int[2][12]; // [signed,unsigned][2,3,4,5,6,7,8,16,29,32,64,128]
|
||||
TypeTableEntry *entry_c_int[CIntTypeCount];
|
||||
TypeTableEntry *entry_c_longdouble;
|
||||
TypeTableEntry *entry_c_void;
|
||||
|
@ -2102,7 +2103,7 @@ struct IrInstructionCall {
|
|||
IrInstruction **args;
|
||||
bool is_comptime;
|
||||
LLVMValueRef tmp_ptr;
|
||||
bool is_inline;
|
||||
FnInline fn_inline;
|
||||
};
|
||||
|
||||
struct IrInstructionConst {
|
||||
|
|
|
@ -3818,12 +3818,14 @@ TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_b
|
|||
index = 6;
|
||||
} else if (size_in_bits == 16) {
|
||||
index = 7;
|
||||
} else if (size_in_bits == 32) {
|
||||
} else if (size_in_bits == 29) {
|
||||
index = 8;
|
||||
} else if (size_in_bits == 64) {
|
||||
} else if (size_in_bits == 32) {
|
||||
index = 9;
|
||||
} else if (size_in_bits == 128) {
|
||||
} else if (size_in_bits == 64) {
|
||||
index = 10;
|
||||
} else if (size_in_bits == 128) {
|
||||
index = 11;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -839,7 +839,7 @@ static void gen_panic(CodeGen *g, LLVMValueRef msg_arg) {
|
|||
assert(g->panic_fn != nullptr);
|
||||
LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn);
|
||||
LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc);
|
||||
ZigLLVMBuildCall(g->builder, fn_val, &msg_arg, 1, llvm_cc, false, "");
|
||||
ZigLLVMBuildCall(g->builder, fn_val, &msg_arg, 1, llvm_cc, ZigLLVM_FnInlineAuto, "");
|
||||
LLVMBuildUnreachable(g->builder);
|
||||
}
|
||||
|
||||
|
@ -988,7 +988,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
|
|||
static void gen_debug_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val) {
|
||||
LLVMValueRef safety_crash_err_fn = get_safety_crash_err_fn(g);
|
||||
ZigLLVMBuildCall(g->builder, safety_crash_err_fn, &err_val, 1, get_llvm_cc(g, CallingConventionUnspecified),
|
||||
false, "");
|
||||
ZigLLVM_FnInlineAuto, "");
|
||||
LLVMBuildUnreachable(g->builder);
|
||||
}
|
||||
|
||||
|
@ -2316,12 +2316,22 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
|
|||
}
|
||||
}
|
||||
|
||||
bool want_always_inline = (instruction->fn_entry != nullptr &&
|
||||
instruction->fn_entry->fn_inline == FnInlineAlways) || instruction->is_inline;
|
||||
ZigLLVM_FnInline fn_inline;
|
||||
switch (instruction->fn_inline) {
|
||||
case FnInlineAuto:
|
||||
fn_inline = ZigLLVM_FnInlineAuto;
|
||||
break;
|
||||
case FnInlineAlways:
|
||||
fn_inline = (instruction->fn_entry == nullptr) ? ZigLLVM_FnInlineAuto : ZigLLVM_FnInlineAlways;
|
||||
break;
|
||||
case FnInlineNever:
|
||||
fn_inline = ZigLLVM_FnInlineNever;
|
||||
break;
|
||||
}
|
||||
|
||||
LLVMCallConv llvm_cc = get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc);
|
||||
LLVMValueRef result = ZigLLVMBuildCall(g->builder, fn_val,
|
||||
gen_param_values, (unsigned)gen_param_index, llvm_cc, want_always_inline, "");
|
||||
gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, "");
|
||||
|
||||
for (size_t param_i = 0; param_i < fn_type_id->param_count; param_i += 1) {
|
||||
FnGenParamInfo *gen_info = &fn_type->data.fn.gen_param_info[param_i];
|
||||
|
@ -4634,6 +4644,7 @@ static const uint8_t int_sizes_in_bits[] = {
|
|||
7,
|
||||
8,
|
||||
16,
|
||||
29,
|
||||
32,
|
||||
64,
|
||||
128,
|
||||
|
@ -4971,6 +4982,7 @@ static void define_builtin_fns(CodeGen *g) {
|
|||
create_builtin_fn(g, BuiltinFnIdRem, "rem", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdMod, "mod", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
|
||||
create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2);
|
||||
|
|
28
src/ir.cpp
28
src/ir.cpp
|
@ -928,13 +928,13 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio
|
|||
|
||||
static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
|
||||
bool is_comptime, bool is_inline)
|
||||
bool is_comptime, FnInline fn_inline)
|
||||
{
|
||||
IrInstructionCall *call_instruction = ir_build_instruction<IrInstructionCall>(irb, scope, source_node);
|
||||
call_instruction->fn_entry = fn_entry;
|
||||
call_instruction->fn_ref = fn_ref;
|
||||
call_instruction->is_comptime = is_comptime;
|
||||
call_instruction->is_inline = is_inline;
|
||||
call_instruction->fn_inline = fn_inline;
|
||||
call_instruction->args = args;
|
||||
call_instruction->arg_count = arg_count;
|
||||
|
||||
|
@ -948,10 +948,10 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc
|
|||
|
||||
static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction,
|
||||
FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
|
||||
bool is_comptime, bool is_inline)
|
||||
bool is_comptime, FnInline fn_inline)
|
||||
{
|
||||
IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope,
|
||||
old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, is_inline);
|
||||
old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline);
|
||||
ir_link_new_instruction(new_instruction, old_instruction);
|
||||
return new_instruction;
|
||||
}
|
||||
|
@ -4672,6 +4672,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
|||
return ir_build_offset_of(irb, scope, node, arg0_value, arg1_value);
|
||||
}
|
||||
case BuiltinFnIdInlineCall:
|
||||
case BuiltinFnIdNoInlineCall:
|
||||
{
|
||||
if (node->data.fn_call_expr.params.length == 0) {
|
||||
add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
|
||||
|
@ -4692,8 +4693,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
|||
if (args[i] == irb->codegen->invalid_instruction)
|
||||
return args[i];
|
||||
}
|
||||
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
|
||||
|
||||
return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, true);
|
||||
return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline);
|
||||
}
|
||||
case BuiltinFnIdTypeId:
|
||||
{
|
||||
|
@ -4804,7 +4806,7 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
|
|||
return args[i];
|
||||
}
|
||||
|
||||
return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, false);
|
||||
return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto);
|
||||
}
|
||||
|
||||
static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
|
||||
|
@ -10617,7 +10619,7 @@ no_mem_slot:
|
|||
|
||||
static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call_instruction,
|
||||
FnTableEntry *fn_entry, TypeTableEntry *fn_type, IrInstruction *fn_ref,
|
||||
IrInstruction *first_arg_ptr, bool comptime_fn_call, bool inline_fn_call)
|
||||
IrInstruction *first_arg_ptr, bool comptime_fn_call, FnInline fn_inline)
|
||||
{
|
||||
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
|
||||
size_t first_arg_1_or_0 = first_arg_ptr ? 1 : 0;
|
||||
|
@ -10876,7 +10878,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
|
|||
|
||||
if (type_requires_comptime(return_type)) {
|
||||
// Throw out our work and call the function as if it were comptime.
|
||||
return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, false);
|
||||
return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10900,7 +10902,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
|
|||
|
||||
size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count;
|
||||
IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base,
|
||||
impl_fn, nullptr, impl_param_count, casted_args, false, inline_fn_call);
|
||||
impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline);
|
||||
|
||||
TypeTableEntry *return_type = impl_fn->type_entry->data.fn.fn_type_id.return_type;
|
||||
ir_add_alloca(ira, new_call_instruction, return_type);
|
||||
|
@ -10959,7 +10961,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
|
|||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base,
|
||||
fn_entry, fn_ref, call_param_count, casted_args, false, inline_fn_call);
|
||||
fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline);
|
||||
|
||||
ir_add_alloca(ira, new_call_instruction, return_type);
|
||||
return ir_finish_anal(ira, return_type);
|
||||
|
@ -10998,13 +11000,13 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction
|
|||
} else if (fn_ref->value.type->id == TypeTableEntryIdFn) {
|
||||
FnTableEntry *fn_table_entry = ir_resolve_fn(ira, fn_ref);
|
||||
return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
|
||||
fn_ref, nullptr, is_comptime, call_instruction->is_inline);
|
||||
fn_ref, nullptr, is_comptime, call_instruction->fn_inline);
|
||||
} else if (fn_ref->value.type->id == TypeTableEntryIdBoundFn) {
|
||||
assert(fn_ref->value.special == ConstValSpecialStatic);
|
||||
FnTableEntry *fn_table_entry = fn_ref->value.data.x_bound_fn.fn;
|
||||
IrInstruction *first_arg_ptr = fn_ref->value.data.x_bound_fn.first_arg;
|
||||
return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
|
||||
nullptr, first_arg_ptr, is_comptime, call_instruction->is_inline);
|
||||
nullptr, first_arg_ptr, is_comptime, call_instruction->fn_inline);
|
||||
} else {
|
||||
ir_add_error_node(ira, fn_ref->source_node,
|
||||
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value.type->name)));
|
||||
|
@ -11014,7 +11016,7 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction
|
|||
|
||||
if (fn_ref->value.type->id == TypeTableEntryIdFn) {
|
||||
return ir_analyze_fn_call(ira, call_instruction, nullptr, fn_ref->value.type,
|
||||
fn_ref, nullptr, false, false);
|
||||
fn_ref, nullptr, false, FnInlineAuto);
|
||||
} else {
|
||||
ir_add_error_node(ira, fn_ref->source_node,
|
||||
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value.type->name)));
|
||||
|
|
|
@ -175,12 +175,19 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM
|
|||
|
||||
|
||||
LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
|
||||
unsigned NumArgs, unsigned CC, bool always_inline, const char *Name)
|
||||
unsigned NumArgs, unsigned CC, ZigLLVM_FnInline fn_inline, const char *Name)
|
||||
{
|
||||
CallInst *call_inst = CallInst::Create(unwrap(Fn), makeArrayRef(unwrap(Args), NumArgs), Name);
|
||||
call_inst->setCallingConv(CC);
|
||||
if (always_inline) {
|
||||
switch (fn_inline) {
|
||||
case ZigLLVM_FnInlineAuto:
|
||||
break;
|
||||
case ZigLLVM_FnInlineAlways:
|
||||
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::AlwaysInline);
|
||||
break;
|
||||
case ZigLLVM_FnInlineNever:
|
||||
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
|
||||
break;
|
||||
}
|
||||
return wrap(unwrap(B)->Insert(call_inst));
|
||||
}
|
||||
|
|
|
@ -45,8 +45,13 @@ enum ZigLLVM_EmitOutputType {
|
|||
bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref,
|
||||
const char *filename, ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug);
|
||||
|
||||
enum ZigLLVM_FnInline {
|
||||
ZigLLVM_FnInlineAuto,
|
||||
ZigLLVM_FnInlineAlways,
|
||||
ZigLLVM_FnInlineNever,
|
||||
};
|
||||
LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
|
||||
unsigned NumArgs, unsigned CC, bool always_inline, const char *Name);
|
||||
unsigned NumArgs, unsigned CC, ZigLLVM_FnInline fn_inline, const char *Name);
|
||||
|
||||
LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp,
|
||||
LLVMValueRef new_val, LLVMAtomicOrdering success_ordering,
|
||||
|
|
|
@ -977,7 +977,7 @@ var some_mem_index: usize = 0;
|
|||
|
||||
error OutOfMemory;
|
||||
|
||||
fn globalAlloc(self: &mem.Allocator, n: usize, alignment: usize) -> %[]u8 {
|
||||
fn globalAlloc(self: &mem.Allocator, n: usize, alignment: u29) -> %[]u8 {
|
||||
const addr = @ptrToInt(&some_mem[some_mem_index]);
|
||||
const rem = @rem(addr, alignment);
|
||||
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
|
||||
|
@ -991,7 +991,7 @@ fn globalAlloc(self: &mem.Allocator, n: usize, alignment: usize) -> %[]u8 {
|
|||
return result;
|
||||
}
|
||||
|
||||
fn globalRealloc(self: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: usize) -> %[]u8 {
|
||||
fn globalRealloc(self: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) -> %[]u8 {
|
||||
if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
} else {
|
||||
|
|
|
@ -16,7 +16,7 @@ pub var c_allocator = Allocator {
|
|||
.freeFn = cFree,
|
||||
};
|
||||
|
||||
fn cAlloc(self: &Allocator, n: usize, alignment: usize) -> %[]u8 {
|
||||
fn cAlloc(self: &Allocator, n: usize, alignment: u29) -> %[]u8 {
|
||||
if (c.malloc(usize(n))) |buf| {
|
||||
@ptrCast(&u8, buf)[0..n]
|
||||
} else {
|
||||
|
@ -24,7 +24,7 @@ fn cAlloc(self: &Allocator, n: usize, alignment: usize) -> %[]u8 {
|
|||
}
|
||||
}
|
||||
|
||||
fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: usize) -> %[]u8 {
|
||||
fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) -> %[]u8 {
|
||||
if (new_size <= old_mem.len) {
|
||||
old_mem[0..new_size]
|
||||
} else {
|
||||
|
@ -106,7 +106,7 @@ pub const IncrementingAllocator = struct {
|
|||
return self.bytes.len - self.end_index;
|
||||
}
|
||||
|
||||
fn alloc(allocator: &Allocator, n: usize, alignment: usize) -> %[]u8 {
|
||||
fn alloc(allocator: &Allocator, n: usize, alignment: u29) -> %[]u8 {
|
||||
const self = @fieldParentPtr(IncrementingAllocator, "allocator", allocator);
|
||||
const addr = @ptrToInt(&self.bytes[self.end_index]);
|
||||
const rem = @rem(addr, alignment);
|
||||
|
@ -121,7 +121,7 @@ pub const IncrementingAllocator = struct {
|
|||
return result;
|
||||
}
|
||||
|
||||
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: usize) -> %[]u8 {
|
||||
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) -> %[]u8 {
|
||||
if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
} else {
|
||||
|
|
54
std/mem.zig
54
std/mem.zig
|
@ -7,22 +7,24 @@ pub const Cmp = math.Cmp;
|
|||
|
||||
pub const Allocator = struct {
|
||||
/// Allocate byte_count bytes and return them in a slice, with the
|
||||
/// slicer's pointer aligned at least to alignment bytes.
|
||||
allocFn: fn (self: &Allocator, byte_count: usize, alignment: usize) -> %[]u8,
|
||||
/// slice's pointer aligned at least to alignment bytes.
|
||||
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) -> %[]u8,
|
||||
|
||||
/// Guaranteed: `old_mem.len` is the same as what was returned from allocFn or reallocFn.
|
||||
/// Guaranteed: alignment >= alignment of old_mem.ptr
|
||||
/// If `new_byte_count > old_mem.len`:
|
||||
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
|
||||
/// * alignment >= alignment of old_mem.ptr
|
||||
///
|
||||
/// If `new_byte_count` is less than or equal to `old_mem.len` this function must
|
||||
/// return successfully.
|
||||
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: usize) -> %[]u8,
|
||||
/// If `new_byte_count <= old_mem.len`:
|
||||
/// * this function must return successfully.
|
||||
/// * alignment <= alignment of old_mem.ptr
|
||||
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) -> %[]u8,
|
||||
|
||||
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
|
||||
freeFn: fn (self: &Allocator, old_mem: []u8),
|
||||
|
||||
fn create(self: &Allocator, comptime T: type) -> %&T {
|
||||
const slice = %return self.alloc(T, 1);
|
||||
&slice[0]
|
||||
return &slice[0];
|
||||
}
|
||||
|
||||
fn destroy(self: &Allocator, ptr: var) {
|
||||
|
@ -30,28 +32,43 @@ pub const Allocator = struct {
|
|||
}
|
||||
|
||||
fn alloc(self: &Allocator, comptime T: type, n: usize) -> %[]T {
|
||||
return self.alignedAlloc(T, @alignOf(T), n);
|
||||
}
|
||||
|
||||
fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29,
|
||||
n: usize) -> %[]align(alignment) T
|
||||
{
|
||||
const byte_count = %return math.mul(usize, @sizeOf(T), n);
|
||||
const byte_slice = %return self.allocFn(self, byte_count, @alignOf(T));
|
||||
([]T)(@alignCast(@alignOf(T), byte_slice))
|
||||
const byte_slice = %return self.allocFn(self, byte_count, alignment);
|
||||
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
|
||||
}
|
||||
|
||||
fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) -> %[]T {
|
||||
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
||||
}
|
||||
|
||||
fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29,
|
||||
old_mem: []align(alignment) T, n: usize) -> %[]align(alignment) T
|
||||
{
|
||||
if (old_mem.len == 0) {
|
||||
return self.alloc(T, n);
|
||||
}
|
||||
|
||||
// Assert that old_mem.ptr is properly aligned.
|
||||
const aligned_old_mem = @alignCast(@alignOf(T), old_mem);
|
||||
|
||||
const byte_count = %return math.mul(usize, @sizeOf(T), n);
|
||||
const byte_slice = %return self.reallocFn(self, ([]u8)(aligned_old_mem), byte_count, @alignOf(T));
|
||||
return ([]T)(@alignCast(@alignOf(T), byte_slice));
|
||||
const byte_slice = %return self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment);
|
||||
return ([]T)(@alignCast(alignment, byte_slice));
|
||||
}
|
||||
|
||||
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
|
||||
/// Unlike `realloc`, this function cannot fail.
|
||||
/// Shrinking to 0 is the same as calling `free`.
|
||||
fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) -> []T {
|
||||
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
||||
}
|
||||
|
||||
fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29,
|
||||
old_mem: []align(alignment) T, n: usize) -> []align(alignment) T
|
||||
{
|
||||
if (n == 0) {
|
||||
self.free(old_mem);
|
||||
return old_mem[0..0];
|
||||
|
@ -59,15 +76,12 @@ pub const Allocator = struct {
|
|||
|
||||
assert(n <= old_mem.len);
|
||||
|
||||
// Assert that old_mem.ptr is properly aligned.
|
||||
const aligned_old_mem = @alignCast(@alignOf(T), old_mem);
|
||||
|
||||
// Here we skip the overflow checking on the multiplication because
|
||||
// n <= old_mem.len and the multiplication didn't overflow for that operation.
|
||||
const byte_count = @sizeOf(T) * n;
|
||||
|
||||
const byte_slice = %%self.reallocFn(self, ([]u8)(aligned_old_mem), byte_count, @alignOf(T));
|
||||
return ([]T)(@alignCast(@alignOf(T), byte_slice));
|
||||
const byte_slice = %%self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment);
|
||||
return ([]T)(@alignCast(alignment, byte_slice));
|
||||
}
|
||||
|
||||
fn free(self: &Allocator, memory: var) {
|
||||
|
|
|
@ -1422,6 +1422,54 @@ pub fn args() -> ArgIterator {
|
|||
return ArgIterator.init();
|
||||
}
|
||||
|
||||
/// Caller must call freeArgs on result.
|
||||
pub fn argsAlloc(allocator: &mem.Allocator) -> %[]const []u8 {
|
||||
// TODO refactor to only make 1 allocation.
|
||||
var it = args();
|
||||
var contents = %return Buffer.initSize(allocator, 0);
|
||||
defer contents.deinit();
|
||||
|
||||
var slice_list = ArrayList(usize).init(allocator);
|
||||
defer slice_list.deinit();
|
||||
|
||||
while (it.next(allocator)) |arg_or_err| {
|
||||
const arg = %return arg_or_err;
|
||||
defer allocator.free(arg);
|
||||
%return contents.append(arg);
|
||||
%return slice_list.append(arg.len);
|
||||
}
|
||||
|
||||
const contents_slice = contents.toSliceConst();
|
||||
const slice_sizes = slice_list.toSliceConst();
|
||||
const slice_list_bytes = %return math.mul(usize, @sizeOf([]u8), slice_sizes.len);
|
||||
const total_bytes = %return math.add(usize, slice_list_bytes, contents_slice.len);
|
||||
const buf = %return allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
|
||||
%defer allocator.free(buf);
|
||||
|
||||
const result_slice_list = ([][]u8)(buf[0..slice_list_bytes]);
|
||||
const result_contents = buf[slice_list_bytes..];
|
||||
mem.copy(u8, result_contents, contents_slice);
|
||||
|
||||
var contents_index: usize = 0;
|
||||
for (slice_sizes) |len, i| {
|
||||
const new_index = contents_index + len;
|
||||
result_slice_list[i] = result_contents[contents_index..new_index];
|
||||
contents_index = new_index;
|
||||
}
|
||||
|
||||
return result_slice_list;
|
||||
}
|
||||
|
||||
pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) {
|
||||
var total_bytes: usize = 0;
|
||||
for (args_alloc) |arg| {
|
||||
total_bytes += @sizeOf([]u8) + arg.len;
|
||||
}
|
||||
const unaligned_allocated_buf = @ptrCast(&u8, args_alloc.ptr)[0..total_bytes];
|
||||
const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
|
||||
return allocator.free(aligned_allocated_buf);
|
||||
}
|
||||
|
||||
test "windows arg parsing" {
|
||||
testWindowsCmdLine(c"a b\tc d", [][]const u8{"a", "b", "c", "d"});
|
||||
testWindowsCmdLine(c"\"abc\" d e", [][]const u8{"abc", "d", "e"});
|
||||
|
|
|
@ -28,7 +28,9 @@ export nakedcc fn _start() -> noreturn {
|
|||
},
|
||||
else => @compileError("unsupported arch"),
|
||||
}
|
||||
posixCallMainAndExit()
|
||||
// If LLVM inlines stack variables into _start, they will overwrite
|
||||
// the command line argument data.
|
||||
@noInlineCall(posixCallMainAndExit);
|
||||
}
|
||||
|
||||
export fn WinMainCRTStartup() -> noreturn {
|
||||
|
|
|
@ -444,4 +444,86 @@ pub fn addCases(cases: &tests.CompareOutputContext) {
|
|||
|
||||
tc
|
||||
});
|
||||
|
||||
cases.addCase({
|
||||
var tc = cases.create("parsing args",
|
||||
\\const std = @import("std");
|
||||
\\const io = std.io;
|
||||
\\const os = std.os;
|
||||
\\const allocator = std.debug.global_allocator;
|
||||
\\
|
||||
\\pub fn main() -> %void {
|
||||
\\ var args_it = os.args();
|
||||
\\ var stdout_file = %return io.getStdOut();
|
||||
\\ var stdout_adapter = io.FileOutStream.init(&stdout_file);
|
||||
\\ const stdout = &stdout_adapter.stream;
|
||||
\\ var index: usize = 0;
|
||||
\\ _ = args_it.skip();
|
||||
\\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) {
|
||||
\\ const arg = %return arg_or_err;
|
||||
\\ %return stdout.print("{}: {}\n", index, arg);
|
||||
\\ }
|
||||
\\}
|
||||
,
|
||||
\\0: first arg
|
||||
\\1: 'a' 'b' \
|
||||
\\2: bare
|
||||
\\3: ba""re
|
||||
\\4: "
|
||||
\\5: last arg
|
||||
\\
|
||||
);
|
||||
|
||||
tc.setCommandLineArgs([][]const u8 {
|
||||
"first arg",
|
||||
"'a' 'b' \\",
|
||||
"bare",
|
||||
"ba\"\"re",
|
||||
"\"",
|
||||
"last arg",
|
||||
});
|
||||
|
||||
tc
|
||||
});
|
||||
|
||||
cases.addCase({
|
||||
var tc = cases.create("parsing args new API",
|
||||
\\const std = @import("std");
|
||||
\\const io = std.io;
|
||||
\\const os = std.os;
|
||||
\\const allocator = std.debug.global_allocator;
|
||||
\\
|
||||
\\pub fn main() -> %void {
|
||||
\\ var args_it = os.args();
|
||||
\\ var stdout_file = %return io.getStdOut();
|
||||
\\ var stdout_adapter = io.FileOutStream.init(&stdout_file);
|
||||
\\ const stdout = &stdout_adapter.stream;
|
||||
\\ var index: usize = 0;
|
||||
\\ _ = args_it.skip();
|
||||
\\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) {
|
||||
\\ const arg = %return arg_or_err;
|
||||
\\ %return stdout.print("{}: {}\n", index, arg);
|
||||
\\ }
|
||||
\\}
|
||||
,
|
||||
\\0: first arg
|
||||
\\1: 'a' 'b' \
|
||||
\\2: bare
|
||||
\\3: ba""re
|
||||
\\4: "
|
||||
\\5: last arg
|
||||
\\
|
||||
);
|
||||
|
||||
tc.setCommandLineArgs([][]const u8 {
|
||||
"first arg",
|
||||
"'a' 'b' \\",
|
||||
"bare",
|
||||
"ba\"\"re",
|
||||
"\"",
|
||||
"last arg",
|
||||
});
|
||||
|
||||
tc
|
||||
});
|
||||
}
|
||||
|
|
|
@ -189,6 +189,7 @@ pub const CompareOutputContext = struct {
|
|||
expected_output: []const u8,
|
||||
link_libc: bool,
|
||||
special: Special,
|
||||
cli_args: []const []const u8,
|
||||
|
||||
const SourceFile = struct {
|
||||
filename: []const u8,
|
||||
|
@ -201,6 +202,10 @@ pub const CompareOutputContext = struct {
|
|||
.source = source,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn setCommandLineArgs(self: &TestCase, args: []const []const u8) {
|
||||
self.cli_args = args;
|
||||
}
|
||||
};
|
||||
|
||||
const RunCompareOutputStep = struct {
|
||||
|
@ -210,9 +215,11 @@ pub const CompareOutputContext = struct {
|
|||
name: []const u8,
|
||||
expected_output: []const u8,
|
||||
test_index: usize,
|
||||
cli_args: []const []const u8,
|
||||
|
||||
pub fn create(context: &CompareOutputContext, exe_path: []const u8,
|
||||
name: []const u8, expected_output: []const u8) -> &RunCompareOutputStep
|
||||
name: []const u8, expected_output: []const u8,
|
||||
cli_args: []const []const u8) -> &RunCompareOutputStep
|
||||
{
|
||||
const allocator = context.b.allocator;
|
||||
const ptr = %%allocator.create(RunCompareOutputStep);
|
||||
|
@ -223,6 +230,7 @@ pub const CompareOutputContext = struct {
|
|||
.expected_output = expected_output,
|
||||
.test_index = context.test_index,
|
||||
.step = build.Step.init("RunCompareOutput", allocator, make),
|
||||
.cli_args = cli_args,
|
||||
};
|
||||
context.test_index += 1;
|
||||
return ptr;
|
||||
|
@ -233,10 +241,17 @@ pub const CompareOutputContext = struct {
|
|||
const b = self.context.b;
|
||||
|
||||
const full_exe_path = b.pathFromRoot(self.exe_path);
|
||||
var args = ArrayList([]const u8).init(b.allocator);
|
||||
defer args.deinit();
|
||||
|
||||
%%args.append(full_exe_path);
|
||||
for (self.cli_args) |arg| {
|
||||
%%args.append(arg);
|
||||
}
|
||||
|
||||
warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
|
||||
|
||||
const child = %%os.ChildProcess.init([][]u8{full_exe_path}, b.allocator);
|
||||
const child = %%os.ChildProcess.init(args.toSliceConst(), b.allocator);
|
||||
defer child.deinit();
|
||||
|
||||
child.stdin_behavior = StdIo.Ignore;
|
||||
|
@ -364,6 +379,7 @@ pub const CompareOutputContext = struct {
|
|||
.expected_output = expected_output,
|
||||
.link_libc = false,
|
||||
.special = special,
|
||||
.cli_args = []const []const u8{},
|
||||
};
|
||||
const root_src_name = if (special == Special.Asm) "source.s" else "source.zig";
|
||||
tc.addSourceFile(root_src_name, source);
|
||||
|
@ -420,7 +436,7 @@ pub const CompareOutputContext = struct {
|
|||
}
|
||||
|
||||
const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(), annotated_case_name,
|
||||
case.expected_output);
|
||||
case.expected_output, case.cli_args);
|
||||
run_and_cmp_output.step.dependOn(&exe.step);
|
||||
|
||||
self.step.dependOn(&run_and_cmp_output.step);
|
||||
|
@ -447,7 +463,7 @@ pub const CompareOutputContext = struct {
|
|||
}
|
||||
|
||||
const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(),
|
||||
annotated_case_name, case.expected_output);
|
||||
annotated_case_name, case.expected_output, case.cli_args);
|
||||
run_and_cmp_output.step.dependOn(&exe.step);
|
||||
|
||||
self.step.dependOn(&run_and_cmp_output.step);
|
||||
|
|
Loading…
Reference in New Issue