hash_map: rename to ArrayHashMap and add new HashMap implementation

master
Sahnvour 2020-08-02 23:24:03 +02:00
parent 3f7cb14b26
commit 575fbd5e35
17 changed files with 2027 additions and 777 deletions

1087
lib/std/array_hash_map.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,8 @@ pub const BufSet = struct {
}
pub fn deinit(self: *BufSet) void {
for (self.hash_map.items()) |entry| {
var it = self.hash_map.iterator();
while (it.next()) |entry| {
self.free(entry.key);
}
self.hash_map.deinit();

File diff suppressed because it is too large Load Diff

View File

@ -325,7 +325,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
break;
}
}
for (self.large_allocations.items()) |*large_alloc| {
var it = self.large_allocations.iterator();
while (it.next()) |large_alloc| {
log.err("Memory leak detected: {}", .{large_alloc.value.getStackTrace()});
leaks = true;
}
@ -584,7 +585,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.entries.items.len + 1,
self.large_allocations.count() + 1,
);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);

View File

@ -123,9 +123,9 @@ pub const Headers = struct {
pub fn deinit(self: *Self) void {
{
for (self.index.items()) |*entry| {
const dex = &entry.value;
dex.deinit(self.allocator);
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.deinit(self.allocator);
self.allocator.free(entry.key);
}
self.index.deinit(self.allocator);
@ -333,7 +333,8 @@ pub const Headers = struct {
fn rebuildIndex(self: *Self) void {
// clear out the indexes
for (self.index.items()) |*entry| {
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.shrinkRetainingCapacity(0);
}
// fill up indexes again; we know capacity is fine from before

View File

@ -3,11 +3,15 @@
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
pub const ArrayHashMap = array_hash_map.ArrayHashMap;
pub const ArrayHashMapUnmanaged = array_hash_map.ArrayHashMapUnmanaged;
pub const ArrayList = @import("array_list.zig").ArrayList;
pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned;
pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged;
pub const ArrayListSentineled = @import("array_list_sentineled.zig").ArrayListSentineled;
pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged;
pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap;
pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BloomFilter = @import("bloom_filter.zig").BloomFilter;
@ -32,10 +36,13 @@ pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
pub const SpinLock = @import("spinlock.zig").SpinLock;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const StringArrayHashMap = array_hash_map.StringArrayHashMap;
pub const StringArrayHashMapUnmanaged = array_hash_map.StringArrayHashMapUnmanaged;
pub const TailQueue = @import("linked_list.zig").TailQueue;
pub const Target = @import("target.zig").Target;
pub const Thread = @import("thread.zig").Thread;
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
pub const build = @import("build.zig");

View File

@ -36,17 +36,17 @@ bin_file_path: []const u8,
/// It's rare for a decl to be exported, so we save memory by having a sparse map of
/// Decl pointers to details about them being exported.
/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
decl_exports: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// We track which export is associated with the given symbol name for quick
/// detection of symbol collisions.
symbol_exports: std.StringHashMapUnmanaged(*Export) = .{},
symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
/// is performing the export of another Decl.
/// This table owns the Export memory.
export_owners: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// Maps fully qualified namespaced names to the Decl struct for them.
decl_table: std.HashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
link_error_flags: link.File.ErrorFlags = .{},
@ -57,13 +57,13 @@ work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// The ErrorMsg memory is owned by the decl, using Module's allocator.
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
/// a Decl can have a failed_decls entry but have analysis status of success.
failed_decls: std.AutoHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
failed_files: std.AutoHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
failed_exports: std.AutoHashMapUnmanaged(*Export, *ErrorMsg) = .{},
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
/// Incrementing integer used to compare against the corresponding Decl
/// field to determine whether a Decl's status applies to an ongoing update, or a
@ -201,9 +201,9 @@ pub const Decl = struct {
/// typed_value may need to be regenerated.
dependencies: DepsTable = .{},
/// The reason this is not `std.AutoHashMapUnmanaged` is a workaround for
/// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for
/// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
pub const DepsTable = std.HashMapUnmanaged(*Decl, void, std.hash_map.getAutoHashFn(*Decl), std.hash_map.getAutoEqlFn(*Decl), false);
pub const DepsTable = std.ArrayHashMapUnmanaged(*Decl, void, std.array_hash_map.getAutoHashFn(*Decl), std.array_hash_map.getAutoEqlFn(*Decl), false);
pub fn destroy(self: *Decl, gpa: *Allocator) void {
gpa.free(mem.spanZ(self.name));
@ -933,7 +933,8 @@ pub fn deinit(self: *Module) void {
self.symbol_exports.deinit(gpa);
self.root_scope.destroy(gpa);
for (self.global_error_set.items()) |entry| {
var it = self.global_error_set.iterator();
while (it.next()) |entry| {
gpa.free(entry.key);
}
self.global_error_set.deinit(gpa);
@ -1756,7 +1757,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(root_scope.decls.items.len);
for (root_scope.decls.items) |file_decl| {
@ -1877,7 +1878,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(self.decl_table.items().len);
for (self.decl_table.items()) |entry| {
@ -2087,7 +2088,7 @@ pub fn getErrorValue(self: *Module, name: []const u8) !std.StringHashMapUnmanage
errdefer self.global_error_set.removeAssertDiscard(name);
gop.entry.key = try self.gpa.dupe(u8, name);
gop.entry.value = @intCast(u16, self.global_error_set.items().len - 1);
gop.entry.value = @intCast(u16, self.global_error_set.count() - 1);
return gop.entry.*;
}

View File

@ -359,7 +359,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const Branch = struct {
inst_table: std.AutoHashMapUnmanaged(*ir.Inst, MCValue) = .{},
inst_table: std.AutoArrayHashMapUnmanaged(*ir.Inst, MCValue) = .{},
fn deinit(self: *Branch, gpa: *Allocator) void {
self.inst_table.deinit(gpa);
@ -750,7 +750,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const ptr_bits = arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1);
if (self.allocReg(inst)) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
@ -788,7 +788,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue {
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, @intCast(u32, self.registers.count() + 1));
const reg = self.allocReg(reg_owner) orelse b: {
// We'll take over the first register. Move the instruction that was previously
@ -1247,7 +1247,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.base.isUnused())
return MCValue.dead;
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1);
const result = self.args[self.arg_index];
self.arg_index += 1;

View File

@ -110,7 +110,8 @@ const Context = struct {
}
fn deinit(self: *Context) void {
for (self.inst_map.items()) |kv| {
var it = self.inst_map.iterator();
while (it.next()) |kv| {
self.file.base.allocator.free(kv.value);
}
self.inst_map.deinit();

View File

@ -47,7 +47,7 @@ pub const File = struct {
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, true);
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
/// For DWARF .debug_info.
pub const DbgInfoTypeReloc = struct {

View File

@ -1629,7 +1629,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
defer {
for (dbg_info_type_relocs.items()) |*entry| {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
}
dbg_info_type_relocs.deinit(self.base.allocator);
@ -1917,7 +1918,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
for (dbg_info_type_relocs.items()) |*entry| {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key, &dbg_info_buffer);
}
@ -1925,7 +1927,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
// Now that we have the offset assigned we can finally perform type relocations.
for (dbg_info_type_relocs.items()) |entry| {
it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
for (entry.value.relocs.items) |off| {
mem.writeInt(
u32,

View File

@ -15,7 +15,7 @@ pub fn analyze(
var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
defer table.deinit();
try table.ensureCapacity(body.instructions.len);
try table.ensureCapacity(@intCast(u32, body.instructions.len));
try analyzeWithTable(arena, &table, null, body);
}
@ -84,8 +84,11 @@ fn analyzeInst(
try analyzeWithTable(arena, table, &then_table, inst.then_body);
// Reset the table back to its state from before the branch.
for (then_table.items()) |entry| {
table.removeAssertDiscard(entry.key);
{
var it = then_table.iterator();
while (it.next()) |entry| {
table.removeAssertDiscard(entry.key);
}
}
var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
@ -97,28 +100,36 @@ fn analyzeInst(
var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer else_entry_deaths.deinit();
for (else_table.items()) |entry| {
const else_death = entry.key;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
{
var it = else_table.iterator();
while (it.next()) |entry| {
const else_death = entry.key;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
}
}
}
// This loop is the same, except it's for the then branch, and it additionally
// has to put its items back into the table to undo the reset.
for (then_table.items()) |entry| {
const then_death = entry.key;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
{
var it = then_table.iterator();
while (it.next()) |entry| {
const then_death = entry.key;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
}
_ = try table.put(then_death, {});
}
_ = try table.put(then_death, {});
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(ns.items().len + then_table.items().len + else_table.items().len);
for (then_table.items()) |entry| {
try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
var it = then_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
for (else_table.items()) |entry| {
it = else_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
}

View File

@ -19,23 +19,9 @@ pub const Error = error{OutOfMemory};
const TypeError = Error || error{UnsupportedType};
const TransError = TypeError || error{UnsupportedTranslation};
const DeclTable = std.HashMap(usize, []const u8, addrHash, addrEql, false);
const DeclTable = std.AutoArrayHashMap(usize, []const u8);
fn addrHash(x: usize) u32 {
switch (@typeInfo(usize).Int.bits) {
32 => return x,
// pointers are usually aligned so we ignore the bits that are probably all 0 anyway
// usually the larger bits of addr space are unused so we just chop em off
64 => return @truncate(u32, x >> 4),
else => @compileError("unreachable"),
}
}
fn addrEql(a: usize, b: usize) bool {
return a == b;
}
const SymbolTable = std.StringHashMap(*ast.Node);
const SymbolTable = std.StringArrayHashMap(*ast.Node);
const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
@ -285,7 +271,7 @@ pub const Context = struct {
/// a list of names that we found by visiting all the top level decls without
/// translating them. The other maps are updated as we translate; this one is updated
/// up front in a pre-processing step.
global_names: std.StringHashMap(void),
global_names: std.StringArrayHashMap(void),
fn getMangle(c: *Context) u32 {
c.mangle_count += 1;
@ -380,7 +366,7 @@ pub fn translate(
.alias_list = AliasList.init(gpa),
.global_scope = try arena.allocator.create(Scope.Root),
.clang_context = ZigClangASTUnit_getASTContext(ast_unit).?,
.global_names = std.StringHashMap(void).init(gpa),
.global_names = std.StringArrayHashMap(void).init(gpa),
.token_ids = .{},
.token_locs = .{},
.errors = .{},
@ -6424,7 +6410,8 @@ fn getFnProto(c: *Context, ref: *ast.Node) ?*ast.Node.FnProto {
}
fn addMacros(c: *Context) !void {
for (c.global_scope.macro_table.items()) |kv| {
var it = c.global_scope.macro_table.iterator();
while (it.next()) |kv| {
if (getFnProto(c, kv.value)) |proto_node| {
// If a macro aliases a global variable which is a function pointer, we conclude that
// the macro is intended to represent a function that assumes the function pointer

View File

@ -238,7 +238,7 @@ pub const Type = extern union {
}
}
pub fn hash(self: Type) u32 {
pub fn hash(self: Type) u64 {
var hasher = std.hash.Wyhash.init(0);
const zig_type_tag = self.zigTypeTag();
std.hash.autoHash(&hasher, zig_type_tag);
@ -303,7 +303,7 @@ pub const Type = extern union {
// TODO implement more type hashing
},
}
return @truncate(u32, hasher.final());
return hasher.final();
}
pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {

View File

@ -358,7 +358,8 @@ pub const Value = extern union {
.error_set => {
const error_set = val.cast(Payload.ErrorSet).?;
try out_stream.writeAll("error{");
for (error_set.fields.items()) |entry| {
var it = error_set.fields.iterator();
while (it.next()) |entry| {
try out_stream.print("{},", .{entry.value});
}
return out_stream.writeAll("}");

View File

@ -1049,7 +1049,7 @@ pub const Module = struct {
defer write.loop_table.deinit();
// First, build a map of *Inst to @ or % indexes
try write.inst_table.ensureCapacity(self.decls.len);
try write.inst_table.ensureCapacity(@intCast(u32, self.decls.len));
for (self.decls) |decl, decl_i| {
try write.inst_table.putNoClobber(decl.inst, .{ .inst = decl.inst, .index = null, .name = decl.name });
@ -1685,7 +1685,7 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.arena = std.heap.ArenaAllocator.init(allocator),
.old_module = &old_module,
.next_auto_name = 0,
.names = std.StringHashMap(void).init(allocator),
.names = std.StringArrayHashMap(void).init(allocator),
.primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
@ -1758,7 +1758,7 @@ const EmitZIR = struct {
arena: std.heap.ArenaAllocator,
old_module: *const IrModule,
decls: std.ArrayListUnmanaged(*Decl),
names: std.StringHashMap(void),
names: std.StringArrayHashMap(void),
next_auto_name: usize,
primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Decl),
indent: usize,

View File

@ -812,7 +812,7 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In
.fields = .{},
.decl = undefined, // populated below
};
try payload.fields.ensureCapacity(&new_decl_arena.allocator, inst.positionals.fields.len);
try payload.fields.ensureCapacity(&new_decl_arena.allocator, @intCast(u32, inst.positionals.fields.len));
for (inst.positionals.fields) |field_name| {
const entry = try mod.getErrorValue(field_name);