IR: add FnProto instruction

master
Andrew Kelley 2016-12-18 19:40:26 -05:00
parent f12fbce0f5
commit a71fbe49cb
12 changed files with 433 additions and 317 deletions

View File

@ -52,11 +52,11 @@ set(ZIG_SOURCES
"${CMAKE_SOURCE_DIR}/src/link.cpp"
"${CMAKE_SOURCE_DIR}/src/main.cpp"
"${CMAKE_SOURCE_DIR}/src/os.cpp"
"${CMAKE_SOURCE_DIR}/src/parseh.cpp"
"${CMAKE_SOURCE_DIR}/src/parser.cpp"
"${CMAKE_SOURCE_DIR}/src/target.cpp"
"${CMAKE_SOURCE_DIR}/src/tokenizer.cpp"
"${CMAKE_SOURCE_DIR}/src/util.cpp"
"${CMAKE_SOURCE_DIR}/src/parseh.cpp"
"${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp"
)

View File

@ -1448,6 +1448,7 @@ enum IrInstructionId {
IrInstructionIdUnwrapErrPayload,
IrInstructionIdErrWrapCode,
IrInstructionIdErrWrapPayload,
IrInstructionIdFnProto,
};
struct IrInstruction {
@ -2060,6 +2061,13 @@ struct IrInstructionErrWrapCode {
LLVMValueRef tmp_ptr;
};
struct IrInstructionFnProto {
IrInstruction base;
IrInstruction **param_types;
IrInstruction *return_type;
};
enum LValPurpose {
LValPurposeNone,
LValPurposeAssign,

View File

@ -2196,6 +2196,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdIntType:
case IrInstructionIdMemberCount:
case IrInstructionIdAlignOf:
case IrInstructionIdFnProto:
zig_unreachable();
case IrInstructionIdReturn:
return ir_render_return(g, executable, (IrInstructionReturn *)instruction);

View File

@ -439,6 +439,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapCode *) {
return IrInstructionIdErrWrapCode;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionFnProto *) {
return IrInstructionIdFnProto;
}
template<typename T>
static T *ir_create_instruction(IrExecutable *exec, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate<T>(1);
@ -1826,6 +1830,22 @@ static IrInstruction *ir_build_unwrap_err_payload_from(IrBuilder *irb, IrInstruc
return new_instruction;
}
static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction **param_types, IrInstruction *return_type)
{
IrInstructionFnProto *instruction = ir_build_instruction<IrInstructionFnProto>(irb, scope, source_node);
instruction->param_types = param_types;
instruction->return_type = return_type;
assert(source_node->type == NodeTypeFnProto);
for (size_t i = 0; i < source_node->data.fn_proto.params.length; i += 1) {
ir_ref_instruction(param_types[i]);
}
ir_ref_instruction(return_type);
return &instruction->base;
}
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@ -3894,6 +3914,28 @@ static IrInstruction *ir_gen_container_decl(IrBuilder *irb, Scope *parent_scope,
return ir_build_const_type(irb, parent_scope, node, container_type);
}
static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
assert(node->type == NodeTypeFnProto);
size_t param_count = node->data.fn_proto.params.length;
IrInstruction **param_types = allocate<IrInstruction*>(param_count);
for (size_t i = 0; i < param_count; i += 1) {
AstNode *param_node = node->data.fn_proto.params.at(i);
AstNode *type_node = param_node->data.param_decl.type;
IrInstruction *type_value = ir_gen_node(irb, type_node, parent_scope);
if (type_value == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
param_types[i] = type_value;
}
IrInstruction *return_type = ir_gen_node(irb, node->data.fn_proto.return_type, parent_scope);
if (return_type == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
return ir_build_fn_proto(irb, parent_scope, node, param_types, return_type);
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
LValPurpose lval)
{
@ -3978,11 +4020,15 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
case NodeTypeContainerDecl:
return ir_lval_wrap(irb, scope, ir_gen_container_decl(irb, scope, node), lval);
case NodeTypeFnProto:
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval);
case NodeTypeFnDef:
zig_panic("TODO IR gen NodeTypeFnDef");
case NodeTypeFnDecl:
zig_panic("TODO IR gen NodeTypeFnDecl");
case NodeTypeErrorValueDecl:
zig_panic("TODO IR gen NodeTypeErrorValueDecl");
case NodeTypeTypeDecl:
zig_panic("TODO more IR gen for node types");
zig_panic("TODO IR gen NodeTypeTypeDecl");
case NodeTypeZeroesLiteral:
zig_panic("TODO zeroes is deprecated");
}
@ -9377,6 +9423,41 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
}
static TypeTableEntry *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstructionFnProto *instruction) {
AstNode *proto_node = instruction->base.source_node;
assert(proto_node->type == NodeTypeFnProto);
FnTypeId fn_type_id = {0};
init_fn_type_id(&fn_type_id, proto_node);
bool depends_on_compile_var = false;
for (; fn_type_id.next_param_index < fn_type_id.param_count; fn_type_id.next_param_index += 1) {
AstNode *param_node = proto_node->data.fn_proto.params.at(fn_type_id.next_param_index);
assert(param_node->type == NodeTypeParamDecl);
IrInstruction *param_type_value = instruction->param_types[fn_type_id.next_param_index]->other;
FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
param_info->is_noalias = param_node->data.param_decl.is_noalias;
param_info->type = ir_resolve_type(ira, param_type_value);
if (param_info->type->id == TypeTableEntryIdInvalid)
return ira->codegen->builtin_types.entry_invalid;
depends_on_compile_var = depends_on_compile_var || param_type_value->static_value.depends_on_compile_var;
}
IrInstruction *return_type_value = instruction->return_type->other;
fn_type_id.return_type = ir_resolve_type(ira, return_type_value);
if (fn_type_id.return_type->id == TypeTableEntryIdInvalid)
return ira->codegen->builtin_types.entry_invalid;
depends_on_compile_var = depends_on_compile_var || return_type_value->static_value.depends_on_compile_var;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base, depends_on_compile_var);
out_val->data.x_type = get_fn_type(ira->codegen, &fn_type_id);
return ira->codegen->builtin_types.entry_type;
}
static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@ -9517,6 +9598,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_unwrap_err_code(ira, (IrInstructionUnwrapErrCode *)instruction);
case IrInstructionIdUnwrapErrPayload:
return ir_analyze_instruction_unwrap_err_payload(ira, (IrInstructionUnwrapErrPayload *)instruction);
case IrInstructionIdFnProto:
return ir_analyze_instruction_fn_proto(ira, (IrInstructionFnProto *)instruction);
case IrInstructionIdMaybeWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
@ -9677,6 +9760,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdMaybeWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdFnProto:
return false;
case IrInstructionIdAsm:
{

View File

@ -882,6 +882,17 @@ static void ir_print_err_wrap_payload(IrPrint *irp, IrInstructionErrWrapPayload
fprintf(irp->f, ")");
}
static void ir_print_fn_proto(IrPrint *irp, IrInstructionFnProto *instruction) {
fprintf(irp->f, "fn(");
for (size_t i = 0; i < instruction->base.source_node->data.fn_proto.params.length; i += 1) {
if (i != 0)
fprintf(irp->f, ",");
ir_print_other_instruction(irp, instruction->param_types[i]);
}
fprintf(irp->f, ")->");
ir_print_other_instruction(irp, instruction->return_type);
}
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@ -1112,6 +1123,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdErrWrapPayload:
ir_print_err_wrap_payload(irp, (IrInstructionErrWrapPayload *)instruction);
break;
case IrInstructionIdFnProto:
ir_print_fn_proto(irp, (IrInstructionFnProto *)instruction);
break;
}
fprintf(irp->f, "\n");
}

View File

@ -69,7 +69,7 @@ pub fn writeStackTrace(out_stream: &io.OutStream) -> %void {
}
}
struct ElfStackTrace {
const ElfStackTrace = struct {
self_exe_stream: io.InStream,
elf: elf.Elf,
debug_info: &elf.SectionHeader,
@ -77,36 +77,36 @@ struct ElfStackTrace {
debug_str: &elf.SectionHeader,
abbrev_table_list: List(AbbrevTableHeader),
compile_unit_list: List(CompileUnit),
}
};
struct CompileUnit {
const CompileUnit = struct {
is_64: bool,
die: &Die,
pc_start: u64,
pc_end: u64,
}
};
const AbbrevTable = List(AbbrevTableEntry);
struct AbbrevTableHeader {
const AbbrevTableHeader = struct {
// offset from .debug_abbrev
offset: u64,
table: AbbrevTable,
}
};
struct AbbrevTableEntry {
const AbbrevTableEntry = struct {
has_children: bool,
abbrev_code: u64,
tag_id: u64,
attrs: List(AbbrevAttr),
}
};
struct AbbrevAttr {
const AbbrevAttr = struct {
attr_id: u64,
form_id: u64,
}
};
enum FormValue {
const FormValue = enum {
Address: u64,
Block: []u8,
Const: Constant,
@ -118,9 +118,9 @@ enum FormValue {
RefSig8: u64,
String: []u8,
StrPtr: u64,
}
};
struct Constant {
const Constant = struct {
payload: []u8,
signed: bool,
@ -131,17 +131,17 @@ struct Constant {
return error.InvalidDebugInfo;
return mem.sliceAsInt(self.payload, false, u64);
}
}
};
struct Die {
const Die = struct {
tag_id: u64,
has_children: bool,
attrs: List(Attr),
struct Attr {
const Attr = struct {
id: u64,
value: FormValue,
}
};
fn getAttr(self: &const Die, id: u64) -> ?&const FormValue {
for (self.attrs.toSlice()) |*attr| {
@ -175,7 +175,7 @@ struct Die {
else => error.InvalidDebugInfo,
}
}
}
};
fn readString(in_stream: &io.InStream) -> %[]u8 {
var buf = List(u8).init(&global_allocator);

View File

@ -30,14 +30,14 @@ pub const SHT_HIPROC = 0x7fffffff;
pub const SHT_LOUSER = 0x80000000;
pub const SHT_HIUSER = 0xffffffff;
pub enum FileType {
pub const FileType = enum {
Relocatable,
Executable,
Shared,
Core,
}
};
pub enum Arch {
pub const Arch = enum {
Sparc,
x86,
Mips,
@ -47,9 +47,9 @@ pub enum Arch {
IA_64,
x86_64,
AArch64,
}
};
pub struct SectionHeader {
pub const SectionHeader = struct {
name: u32,
sh_type: u32,
flags: u64,
@ -60,9 +60,9 @@ pub struct SectionHeader {
info: u32,
addr_align: u64,
ent_size: u64,
}
};
pub struct Elf {
pub const Elf = struct {
in_stream: &io.InStream,
auto_close_stream: bool,
is_64: bool,
@ -258,4 +258,4 @@ pub struct Elf {
pub fn seekToSection(elf: &Elf, section: &SectionHeader) -> %void {
%return elf.in_stream.seekTo(section.offset);
}
}
};

View File

@ -13,220 +13,225 @@ pub fn HashMap(inline K: type, inline V: type, inline hash: fn(key: K)->u32,
SmallHashMap(K, V, hash, eql, @sizeOf(usize))
}
pub struct SmallHashMap(K: type, V: type, hash: fn(key: K)->u32, eql: fn(a: K, b: K)->bool, static_size: usize) {
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
allocator: &Allocator,
// if the hash map is small enough, we use linear search through these
// entries instead of allocating memory
prealloc_entries: [static_size]Entry,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
pub fn SmallHashMap(inline K: type, inline V: type,
inline hash: fn(key: K)->u32, inline eql: fn(a: K, b: K)->bool,
inline static_size: usize) -> type
{
struct {
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
allocator: &Allocator,
// if the hash map is small enough, we use linear search through these
// entries instead of allocating memory
prealloc_entries: [static_size]Entry,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
const Self = this;
const Self = this;
pub struct Entry {
used: bool,
distance_from_start_index: usize,
key: K,
value: V,
}
pub struct Iterator {
hm: &Self,
// how many items have we returned
count: usize,
// iterator through the entry array
index: usize,
// used to detect concurrent modification
initial_modification_count: debug_u32,
pub fn next(it: &Iterator) -> ?&Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
if (it.count >= it.hm.size) return null;
while (it.index < it.hm.entries.len; it.index += 1) {
const entry = &it.hm.entries[it.index];
if (entry.used) {
it.index += 1;
it.count += 1;
return entry;
}
}
@unreachable() // no next item
}
}
pub fn init(hm: &Self, allocator: &Allocator) {
hm.entries = hm.prealloc_entries[0...];
hm.allocator = allocator;
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.prealloc_entries = zeroes; // sets used to false for all entries
hm.modification_count = zeroes;
}
pub fn deinit(hm: &Self) {
if (hm.entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, hm.entries);
}
}
pub fn clear(hm: &Self) {
for (hm.entries) |*entry| {
entry.used = false;
}
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.incrementModificationCount();
}
pub fn put(hm: &Self, key: K, value: V) -> %void {
hm.incrementModificationCount();
const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) {
// preallocated entries table is full
hm.size == hm.entries.len
} else {
// if we get too full (60%), double the capacity
hm.size * 5 >= hm.entries.len * 3
pub const Entry = struct {
used: bool,
distance_from_start_index: usize,
key: K,
value: V,
};
if (resize) {
const old_entries = hm.entries;
%return hm.initCapacity(hm.entries.len * 2);
// dump all of the old elements into the new table
for (old_entries) |*old_entry| {
if (old_entry.used) {
hm.internalPut(old_entry.key, old_entry.value);
pub const Iterator = struct {
hm: &Self,
// how many items have we returned
count: usize,
// iterator through the entry array
index: usize,
// used to detect concurrent modification
initial_modification_count: debug_u32,
pub fn next(it: &Iterator) -> ?&Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
}
if (old_entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, old_entries);
}
}
hm.internalPut(key, value);
}
pub fn get(hm: &Self, key: K) -> ?&Entry {
return hm.internalGet(key);
}
pub fn remove(hm: &Self, key: K) {
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
var entry = &hm.entries[index];
assert(entry.used); // key not found
if (!eql(entry.key, key)) continue;
while (roll_over < hm.entries.len; roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len;
const next_entry = &hm.entries[next_index];
if (!next_entry.used || next_entry.distance_from_start_index == 0) {
entry.used = false;
hm.size -= 1;
return;
if (it.count >= it.hm.size) return null;
while (it.index < it.hm.entries.len; it.index += 1) {
const entry = &it.hm.entries[it.index];
if (entry.used) {
it.index += 1;
it.count += 1;
return entry;
}
}
*entry = *next_entry;
entry.distance_from_start_index -= 1;
entry = next_entry;
@unreachable() // no next item
}
@unreachable() // shifting everything in the table
}}
@unreachable() // key not found
}
pub fn entryIterator(hm: &Self) -> Iterator {
return Iterator {
.hm = hm,
.count = 0,
.index = 0,
.initial_modification_count = hm.modification_count,
};
}
fn initCapacity(hm: &Self, capacity: usize) -> %void {
hm.entries = %return hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
for (hm.entries) |*entry| {
entry.used = false;
pub fn init(hm: &Self, allocator: &Allocator) {
hm.entries = hm.prealloc_entries[0...];
hm.allocator = allocator;
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.prealloc_entries = zeroes; // sets used to false for all entries
hm.modification_count = zeroes;
}
}
fn incrementModificationCount(hm: &Self) {
if (want_modification_safety) {
hm.modification_count +%= 1;
pub fn deinit(hm: &Self) {
if (hm.entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, hm.entries);
}
}
}
fn internalPut(hm: &Self, orig_key: K, orig_value: V) {
var key = orig_key;
var value = orig_value;
const start_index = hm.keyToIndex(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over < hm.entries.len; {roll_over += 1; distance_from_start_index += 1}) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (entry.used && !eql(entry.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue
const tmp = *entry;
hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
key = tmp.key;
value = tmp.value;
distance_from_start_index = tmp.distance_from_start_index;
}
continue;
pub fn clear(hm: &Self) {
for (hm.entries) |*entry| {
entry.used = false;
}
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.incrementModificationCount();
}
if (!entry.used) {
// adding an entry. otherwise overwriting old value with
// same key
hm.size += 1;
}
pub fn put(hm: &Self, key: K, value: V) -> %void {
hm.incrementModificationCount();
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) {
// preallocated entries table is full
hm.size == hm.entries.len
} else {
// if we get too full (60%), double the capacity
hm.size * 5 >= hm.entries.len * 3
};
return;
if (resize) {
const old_entries = hm.entries;
%return hm.initCapacity(hm.entries.len * 2);
// dump all of the old elements into the new table
for (old_entries) |*old_entry| {
if (old_entry.used) {
hm.internalPut(old_entry.key, old_entry.value);
}
}
if (old_entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, old_entries);
}
}
hm.internalPut(key, value);
}
@unreachable() // put into a full map
}
fn internalGet(hm: &Self, key: K) -> ?&Entry {
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
pub fn get(hm: &Self, key: K) -> ?&Entry {
return hm.internalGet(key);
}
if (!entry.used) return null;
if (eql(entry.key, key)) return entry;
}}
return null;
}
pub fn remove(hm: &Self, key: K) {
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
var entry = &hm.entries[index];
fn keyToIndex(hm: &Self, key: K) -> usize {
return usize(hash(key)) % hm.entries.len;
assert(entry.used); // key not found
if (!eql(entry.key, key)) continue;
while (roll_over < hm.entries.len; roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len;
const next_entry = &hm.entries[next_index];
if (!next_entry.used || next_entry.distance_from_start_index == 0) {
entry.used = false;
hm.size -= 1;
return;
}
*entry = *next_entry;
entry.distance_from_start_index -= 1;
entry = next_entry;
}
@unreachable() // shifting everything in the table
}}
@unreachable() // key not found
}
pub fn entryIterator(hm: &Self) -> Iterator {
return Iterator {
.hm = hm,
.count = 0,
.index = 0,
.initial_modification_count = hm.modification_count,
};
}
fn initCapacity(hm: &Self, capacity: usize) -> %void {
hm.entries = %return hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
for (hm.entries) |*entry| {
entry.used = false;
}
}
fn incrementModificationCount(hm: &Self) {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
fn internalPut(hm: &Self, orig_key: K, orig_value: V) {
var key = orig_key;
var value = orig_value;
const start_index = hm.keyToIndex(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over < hm.entries.len; {roll_over += 1; distance_from_start_index += 1}) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (entry.used && !eql(entry.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue
const tmp = *entry;
hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
key = tmp.key;
value = tmp.value;
distance_from_start_index = tmp.distance_from_start_index;
}
continue;
}
if (!entry.used) {
// adding an entry. otherwise overwriting old value with
// same key
hm.size += 1;
}
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
return;
}
@unreachable() // put into a full map
}
fn internalGet(hm: &Self, key: K) -> ?&Entry {
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (!entry.used) return null;
if (eql(entry.key, key)) return entry;
}}
return null;
}
fn keyToIndex(hm: &Self, key: K) -> usize {
return usize(hash(key)) % hm.entries.len;
}
}
}

View File

@ -3,49 +3,51 @@ const assert = debug.assert;
const mem = @import("mem.zig");
const Allocator = mem.Allocator;
pub struct List(T: type) {
const Self = this;
pub fn List(inline T: type) -> type{
struct {
const Self = this;
items: []T,
len: usize,
allocator: &Allocator,
items: []T,
len: usize,
allocator: &Allocator,
pub fn init(allocator: &Allocator) -> Self {
Self {
.items = zeroes,
.len = 0,
.allocator = allocator,
pub fn init(allocator: &Allocator) -> Self {
Self {
.items = zeroes,
.len = 0,
.allocator = allocator,
}
}
}
pub fn deinit(l: &Self) {
l.allocator.free(T, l.items);
}
pub fn toSlice(l: &Self) -> []T {
return l.items[0...l.len];
}
pub fn append(l: &Self, item: T) -> %void {
const new_length = l.len + 1;
%return l.ensureCapacity(new_length);
l.items[l.len] = item;
l.len = new_length;
}
pub fn resize(l: &Self, new_len: usize) -> %void {
%return l.ensureCapacity(new_len);
l.len = new_len;
}
pub fn ensureCapacity(l: &Self, new_capacity: usize) -> %void {
var better_capacity = l.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
pub fn deinit(l: &Self) {
l.allocator.free(T, l.items);
}
pub fn toSlice(l: &Self) -> []T {
return l.items[0...l.len];
}
pub fn append(l: &Self, item: T) -> %void {
const new_length = l.len + 1;
%return l.ensureCapacity(new_length);
l.items[l.len] = item;
l.len = new_length;
}
pub fn resize(l: &Self, new_len: usize) -> %void {
%return l.ensureCapacity(new_len);
l.len = new_len;
}
pub fn ensureCapacity(l: &Self, new_capacity: usize) -> %void {
var better_capacity = l.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
l.items = %return l.allocator.realloc(T, l.items, better_capacity);
}
l.items = %return l.allocator.realloc(T, l.items, better_capacity);
}
}

View File

@ -8,7 +8,7 @@ pub const Cmp = math.Cmp;
pub error NoMem;
pub type Context = u8;
pub struct Allocator {
pub const Allocator = struct {
allocFn: fn (self: &Allocator, n: usize) -> %[]u8,
reallocFn: fn (self: &Allocator, old_mem: []u8, new_size: usize) -> %[]u8,
freeFn: fn (self: &Allocator, mem: []u8),
@ -39,7 +39,7 @@ pub struct Allocator {
fn free(self: &Allocator, inline T: type, mem: []T) {
self.freeFn(self, ([]u8)(mem));
}
}
};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.

View File

@ -13,7 +13,7 @@ pub error NoMem;
pub error NotSocket;
pub error BadFd;
struct Connection {
const Connection = struct {
socket_fd: i32,
pub fn send(c: Connection, buf: []const u8) -> %usize {
@ -56,14 +56,14 @@ struct Connection {
else => return error.Unexpected,
}
}
}
};
struct Address {
const Address = struct {
family: u16,
scope_id: u32,
addr: [16]u8,
sort_key: i32,
}
};
pub fn lookup(hostname: []const u8, out_addrs: []Address) -> %[]Address {
if (hostname.len == 0) {

View File

@ -18,7 +18,7 @@ pub const MT19937_64 = MersenneTwister(
43, 6364136223846793005);
/// Use `init` to initialize this state.
pub struct Rand {
pub const Rand = struct {
const Rng = if (@sizeOf(usize) >= 8) MT19937_64 else MT19937_32;
rng: Rng,
@ -91,65 +91,67 @@ pub struct Rand {
};
return T(r.rangeUnsigned(int_type, 0, precision)) / T(precision);
}
}
};
struct MersenneTwister(
int: type, n: usize, m: usize, r: int,
a: int,
u: int, d: int,
s: int, b: int,
t: int, c: int,
l: int, f: int)
fn MersenneTwister(
inline int: type, inline n: usize, inline m: usize, inline r: int,
inline a: int,
inline u: int, inline d: int,
inline s: int, inline b: int,
inline t: int, inline c: int,
inline l: int, inline f: int) -> type
{
const Self = this;
struct {
const Self = this;
array: [n]int,
index: usize,
array: [n]int,
index: usize,
pub fn init(mt: &Self, seed: int) {
mt.index = n;
pub fn init(mt: &Self, seed: int) {
mt.index = n;
var prev_value = seed;
mt.array[0] = prev_value;
{var i: usize = 1; while (i < n; i += 1) {
prev_value = int(i) +% f *% (prev_value ^ (prev_value >> (int.bit_count - 2)));
mt.array[i] = prev_value;
}};
}
pub fn get(mt: &Self) -> int {
const mag01 = []int{0, a};
const LM: int = (1 << r) - 1;
const UM = ~LM;
if (mt.index >= n) {
var i: usize = 0;
while (i < n - m; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
}
while (i < n - 1; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
}
const x = (mt.array[i] & UM) | (mt.array[0] & LM);
mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
mt.index = 0;
var prev_value = seed;
mt.array[0] = prev_value;
{var i: usize = 1; while (i < n; i += 1) {
prev_value = int(i) +% f *% (prev_value ^ (prev_value >> (int.bit_count - 2)));
mt.array[i] = prev_value;
}};
}
var x = mt.array[mt.index];
mt.index += 1;
pub fn get(mt: &Self) -> int {
const mag01 = []int{0, a};
const LM: int = (1 << r) - 1;
const UM = ~LM;
x ^= ((x >> u) & d);
x ^= ((x <<% s) & b);
x ^= ((x <<% t) & c);
x ^= (x >> l);
if (mt.index >= n) {
var i: usize = 0;
return x;
while (i < n - m; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
}
while (i < n - 1; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
}
const x = (mt.array[i] & UM) | (mt.array[0] & LM);
mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
mt.index = 0;
}
var x = mt.array[mt.index];
mt.index += 1;
x ^= ((x >> u) & d);
x ^= ((x <<% s) & b);
x ^= ((x <<% t) & c);
x ^= (x >> l);
return x;
}
}
}