Merge pull request #5786 from ziglang/std-hash-map

reimplement std.HashMap
This commit is contained in:
Andrew Kelley 2020-07-05 21:12:20 +00:00 committed by GitHub
commit 289eab9177
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1011 additions and 589 deletions

View File

@ -392,7 +392,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.n = header_stack_size,
},
});
if (try urls.put(urlized, tag_token)) |entry| {
if (try urls.fetchPut(urlized, tag_token)) |entry| {
parseError(tokenizer, tag_token, "duplicate header url: #{}", .{urlized}) catch {};
parseError(tokenizer, entry.value, "other tag here", .{}) catch {};
return error.ParseError;

View File

@ -5363,11 +5363,11 @@ const std = @import("std");
const assert = std.debug.assert;
test "turn HashMap into a set with void" {
var map = std.HashMap(i32, void, hash_i32, eql_i32).init(std.testing.allocator);
var map = std.AutoHashMap(i32, void).init(std.testing.allocator);
defer map.deinit();
_ = try map.put(1, {});
_ = try map.put(2, {});
try map.put(1, {});
try map.put(2, {});
assert(map.contains(2));
assert(!map.contains(3));
@ -5375,14 +5375,6 @@ test "turn HashMap into a set with void" {
_ = map.remove(2);
assert(!map.contains(2));
}
fn hash_i32(x: i32) u32 {
return @bitCast(u32, x);
}
fn eql_i32(a: i32, b: i32) bool {
return a == b;
}
{#code_end#}
<p>Note that this is different from using a dummy value for the hash map value.
By using {#syntax#}void{#endsyntax#} as the type of the value, the hash map entry type has no value field, and

View File

@ -210,6 +210,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
self.capacity = new_len;
}
/// Reduce length to `new_len`.
/// Invalidates element pointers.
/// Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@ -432,6 +440,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
self.capacity = new_len;
}
/// Reduce length to `new_len`.
/// Invalidates element pointers.
/// Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;

View File

@ -33,10 +33,10 @@ pub const BufMap = struct {
pub fn setMove(self: *BufMap, key: []u8, value: []u8) !void {
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.kv.key);
get_or_put.kv.key = key;
self.free(get_or_put.entry.key);
get_or_put.entry.key = key;
}
get_or_put.kv.value = value;
get_or_put.entry.value = value;
}
/// `key` and `value` are copied into the BufMap.
@ -45,19 +45,18 @@ pub const BufMap = struct {
errdefer self.free(value_copy);
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.kv.value);
self.free(get_or_put.entry.value);
} else {
get_or_put.kv.key = self.copy(key) catch |err| {
get_or_put.entry.key = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
};
}
get_or_put.kv.value = value_copy;
get_or_put.entry.value = value_copy;
}
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
const entry = self.hash_map.get(key) orelse return null;
return entry.value;
return self.hash_map.get(key);
}
pub fn delete(self: *BufMap, key: []const u8) void {

View File

@ -14,14 +14,12 @@ pub const BufSet = struct {
return self;
}
pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() orelse break;
pub fn deinit(self: *BufSet) void {
for (self.hash_map.items()) |entry| {
self.free(entry.key);
}
self.hash_map.deinit();
self.* = undefined;
}
pub fn put(self: *BufSet, key: []const u8) !void {

View File

@ -422,12 +422,12 @@ pub const Builder = struct {
.type_id = type_id,
.description = description,
};
if ((self.available_options_map.put(name, available_option) catch unreachable) != null) {
if ((self.available_options_map.fetchPut(name, available_option) catch unreachable) != null) {
panic("Option '{}' declared twice", .{name});
}
self.available_options_list.append(available_option) catch unreachable;
const entry = self.user_input_options.get(name) orelse return null;
const entry = self.user_input_options.getEntry(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@ -634,7 +634,7 @@ pub const Builder = struct {
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
gop.entry.value = UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
.used = false,
@ -643,7 +643,7 @@ pub const Builder = struct {
}
// option already exists
switch (gop.kv.value.value) {
switch (gop.entry.value.value) {
UserValue.Scalar => |s| {
// turn it into a list
var list = ArrayList([]const u8).init(self.allocator);
@ -675,7 +675,7 @@ pub const Builder = struct {
pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
gop.entry.value = UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
.used = false,
@ -684,7 +684,7 @@ pub const Builder = struct {
}
// option already exists
switch (gop.kv.value.value) {
switch (gop.entry.value.value) {
UserValue.Scalar => |s| {
warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", .{ name, name, s });
return true;

View File

@ -1132,7 +1132,7 @@ pub const DebugInfo = struct {
const seg_end = seg_start + segment_cmd.vmsize;
if (rebased_address >= seg_start and rebased_address < seg_end) {
if (self.address_map.getValue(base_address)) |obj_di| {
if (self.address_map.get(base_address)) |obj_di| {
return obj_di;
}
@ -1204,7 +1204,7 @@ pub const DebugInfo = struct {
const seg_end = seg_start + info.SizeOfImage;
if (address >= seg_start and address < seg_end) {
if (self.address_map.getValue(seg_start)) |obj_di| {
if (self.address_map.get(seg_start)) |obj_di| {
return obj_di;
}
@ -1278,7 +1278,7 @@ pub const DebugInfo = struct {
else => return error.MissingDebugInfo,
}
if (self.address_map.getValue(ctx.base_address)) |obj_di| {
if (self.address_map.get(ctx.base_address)) |obj_di| {
return obj_di;
}
@ -1441,7 +1441,7 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) {
const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]);
// Check if its debug infos are already in the cache
var o_file_di = self.ofiles.getValue(o_file_path) orelse
var o_file_di = self.ofiles.get(o_file_path) orelse
(self.loadOFile(o_file_path) catch |err| switch (err) {
error.FileNotFound,
error.MissingDebugInfo,

File diff suppressed because it is too large Load Diff

View File

@ -118,13 +118,12 @@ pub const Headers = struct {
};
}
pub fn deinit(self: Self) void {
pub fn deinit(self: *Self) void {
{
var it = self.index.iterator();
while (it.next()) |kv| {
var dex = &kv.value;
for (self.index.items()) |*entry| {
const dex = &entry.value;
dex.deinit();
self.allocator.free(kv.key);
self.allocator.free(entry.key);
}
self.index.deinit();
}
@ -134,6 +133,7 @@ pub const Headers = struct {
}
self.data.deinit();
}
self.* = undefined;
}
pub fn clone(self: Self, allocator: *Allocator) !Self {
@ -155,10 +155,10 @@ pub const Headers = struct {
const n = self.data.items.len + 1;
try self.data.ensureCapacity(n);
var entry: HeaderEntry = undefined;
if (self.index.get(name)) |kv| {
if (self.index.getEntry(name)) |kv| {
entry = try HeaderEntry.init(self.allocator, kv.key, value, never_index);
errdefer entry.deinit();
var dex = &kv.value;
const dex = &kv.value;
try dex.append(n - 1);
} else {
const name_dup = try self.allocator.dupe(u8, name);
@ -195,7 +195,7 @@ pub const Headers = struct {
/// Returns boolean indicating if something was deleted.
pub fn delete(self: *Self, name: []const u8) bool {
if (self.index.remove(name)) |kv| {
var dex = &kv.value;
const dex = &kv.value;
// iterate backwards
var i = dex.items.len;
while (i > 0) {
@ -207,7 +207,7 @@ pub const Headers = struct {
}
dex.deinit();
self.allocator.free(kv.key);
self.rebuild_index();
self.rebuildIndex();
return true;
} else {
return false;
@ -216,45 +216,52 @@ pub const Headers = struct {
/// Removes the element at the specified index.
/// Moves items down to fill the empty space.
/// TODO this implementation can be replaced by adding
/// orderedRemove to the new hash table implementation as an
/// alternative to swapRemove.
pub fn orderedRemove(self: *Self, i: usize) void {
const removed = self.data.orderedRemove(i);
const kv = self.index.get(removed.name).?;
var dex = &kv.value;
const kv = self.index.getEntry(removed.name).?;
const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
_ = self.index.remove(kv.key);
dex.deinit();
removed.deinit();
self.allocator.free(kv.key);
const key = kv.key;
_ = self.index.remove(key); // invalidates `kv` and `dex`
self.allocator.free(key);
} else {
dex.shrink(dex.items.len - 1);
removed.deinit();
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
self.rebuild_index();
self.rebuildIndex();
}
}
/// Removes the element at the specified index.
/// The empty slot is filled from the end of the list.
/// TODO this implementation can be replaced by simply using the
/// new hash table which does swap removal.
pub fn swapRemove(self: *Self, i: usize) void {
const removed = self.data.swapRemove(i);
const kv = self.index.get(removed.name).?;
var dex = &kv.value;
const kv = self.index.getEntry(removed.name).?;
const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
_ = self.index.remove(kv.key);
dex.deinit();
removed.deinit();
self.allocator.free(kv.key);
const key = kv.key;
_ = self.index.remove(key); // invalidates `kv` and `dex`
self.allocator.free(key);
} else {
dex.shrink(dex.items.len - 1);
removed.deinit();
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
self.rebuild_index();
self.rebuildIndex();
}
}
@ -266,11 +273,7 @@ pub const Headers = struct {
/// Returns a list of indices containing headers with the given name.
/// The returned list should not be modified by the caller.
pub fn getIndices(self: Self, name: []const u8) ?HeaderIndexList {
if (self.index.get(name)) |kv| {
return kv.value;
} else {
return null;
}
return self.index.get(name);
}
/// Returns a slice containing each header with the given name.
@ -325,25 +328,20 @@ pub const Headers = struct {
return buf;
}
fn rebuild_index(self: *Self) void {
{ // clear out the indexes
var it = self.index.iterator();
while (it.next()) |kv| {
var dex = &kv.value;
dex.items.len = 0; // keeps capacity available
}
fn rebuildIndex(self: *Self) void {
// clear out the indexes
for (self.index.items()) |*entry| {
entry.value.shrinkRetainingCapacity(0);
}
{ // fill up indexes again; we know capacity is fine from before
for (self.data.span()) |entry, i| {
var dex = &self.index.get(entry.name).?.value;
dex.appendAssumeCapacity(i);
}
// fill up indexes again; we know capacity is fine from before
for (self.data.items) |entry, i| {
self.index.getEntry(entry.name).?.value.appendAssumeCapacity(i);
}
}
pub fn sort(self: *Self) void {
std.sort.sort(HeaderEntry, self.data.items, {}, HeaderEntry.compare);
self.rebuild_index();
self.rebuildIndex();
}
pub fn format(

View File

@ -2149,27 +2149,27 @@ test "json.parser.dynamic" {
var root = tree.root;
var image = root.Object.get("Image").?.value;
var image = root.Object.get("Image").?;
const width = image.Object.get("Width").?.value;
const width = image.Object.get("Width").?;
testing.expect(width.Integer == 800);
const height = image.Object.get("Height").?.value;
const height = image.Object.get("Height").?;
testing.expect(height.Integer == 600);
const title = image.Object.get("Title").?.value;
const title = image.Object.get("Title").?;
testing.expect(mem.eql(u8, title.String, "View from 15th Floor"));
const animated = image.Object.get("Animated").?.value;
const animated = image.Object.get("Animated").?;
testing.expect(animated.Bool == false);
const array_of_object = image.Object.get("ArrayOfObject").?.value;
const array_of_object = image.Object.get("ArrayOfObject").?;
testing.expect(array_of_object.Array.items.len == 1);
const obj0 = array_of_object.Array.items[0].Object.get("n").?.value;
const obj0 = array_of_object.Array.items[0].Object.get("n").?;
testing.expect(mem.eql(u8, obj0.String, "m"));
const double = image.Object.get("double").?.value;
const double = image.Object.get("double").?;
testing.expect(double.Float == 1.3412);
}
@ -2217,12 +2217,12 @@ test "write json then parse it" {
var tree = try parser.parse(fixed_buffer_stream.getWritten());
defer tree.deinit();
testing.expect(tree.root.Object.get("f").?.value.Bool == false);
testing.expect(tree.root.Object.get("t").?.value.Bool == true);
testing.expect(tree.root.Object.get("int").?.value.Integer == 1234);
testing.expect(tree.root.Object.get("array").?.value.Array.items[0].Null == {});
testing.expect(tree.root.Object.get("array").?.value.Array.items[1].Float == 12.34);
testing.expect(mem.eql(u8, tree.root.Object.get("str").?.value.String, "hello"));
testing.expect(tree.root.Object.get("f").?.Bool == false);
testing.expect(tree.root.Object.get("t").?.Bool == true);
testing.expect(tree.root.Object.get("int").?.Integer == 1234);
testing.expect(tree.root.Object.get("array").?.Array.items[0].Null == {});
testing.expect(tree.root.Object.get("array").?.Array.items[1].Float == 12.34);
testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
}
fn test_parse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
@ -2245,7 +2245,7 @@ test "integer after float has proper type" {
\\ "ints": [1, 2, 3]
\\}
);
std.testing.expect(json.Object.getValue("ints").?.Array.items[0] == .Integer);
std.testing.expect(json.Object.get("ints").?.Array.items[0] == .Integer);
}
test "escaped characters" {
@ -2271,16 +2271,16 @@ test "escaped characters" {
const obj = (try test_parse(&arena_allocator.allocator, input)).Object;
testing.expectEqualSlices(u8, obj.get("backslash").?.value.String, "\\");
testing.expectEqualSlices(u8, obj.get("forwardslash").?.value.String, "/");
testing.expectEqualSlices(u8, obj.get("newline").?.value.String, "\n");
testing.expectEqualSlices(u8, obj.get("carriagereturn").?.value.String, "\r");
testing.expectEqualSlices(u8, obj.get("tab").?.value.String, "\t");
testing.expectEqualSlices(u8, obj.get("formfeed").?.value.String, "\x0C");
testing.expectEqualSlices(u8, obj.get("backspace").?.value.String, "\x08");
testing.expectEqualSlices(u8, obj.get("doublequote").?.value.String, "\"");
testing.expectEqualSlices(u8, obj.get("unicode").?.value.String, "ą");
testing.expectEqualSlices(u8, obj.get("surrogatepair").?.value.String, "😂");
testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
testing.expectEqualSlices(u8, obj.get("newline").?.String, "\n");
testing.expectEqualSlices(u8, obj.get("carriagereturn").?.String, "\r");
testing.expectEqualSlices(u8, obj.get("tab").?.String, "\t");
testing.expectEqualSlices(u8, obj.get("formfeed").?.String, "\x0C");
testing.expectEqualSlices(u8, obj.get("backspace").?.String, "\x08");
testing.expectEqualSlices(u8, obj.get("doublequote").?.String, "\"");
testing.expectEqualSlices(u8, obj.get("unicode").?.String, "ą");
testing.expectEqualSlices(u8, obj.get("surrogatepair").?.String, "😂");
}
test "string copy option" {
@ -2306,11 +2306,11 @@ test "string copy option" {
const obj_copy = tree_copy.root.Object;
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
testing.expectEqualSlices(u8, obj_nocopy.getValue(field_name).?.String, obj_copy.getValue(field_name).?.String);
testing.expectEqualSlices(u8, obj_nocopy.get(field_name).?.String, obj_copy.get(field_name).?.String);
}
const nocopy_addr = &obj_nocopy.getValue("noescape").?.String[0];
const copy_addr = &obj_copy.getValue("noescape").?.String[0];
const nocopy_addr = &obj_nocopy.get("noescape").?.String[0];
const copy_addr = &obj_copy.get("noescape").?.String[0];
var found_nocopy = false;
for (input) |_, index| {

View File

@ -75,7 +75,7 @@ deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
keep_source_files_loaded: bool,
const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql);
const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false);
const WorkItem = union(enum) {
/// Write the machine code for a Decl to the output file.
@ -795,49 +795,38 @@ pub fn deinit(self: *Module) void {
const allocator = self.allocator;
self.deletion_set.deinit(allocator);
self.work_queue.deinit();
{
var it = self.decl_table.iterator();
while (it.next()) |kv| {
kv.value.destroy(allocator);
}
self.decl_table.deinit();
for (self.decl_table.items()) |entry| {
entry.value.destroy(allocator);
}
{
var it = self.failed_decls.iterator();
while (it.next()) |kv| {
kv.value.destroy(allocator);
}
self.failed_decls.deinit();
self.decl_table.deinit();
for (self.failed_decls.items()) |entry| {
entry.value.destroy(allocator);
}
{
var it = self.failed_files.iterator();
while (it.next()) |kv| {
kv.value.destroy(allocator);
}
self.failed_files.deinit();
self.failed_decls.deinit();
for (self.failed_files.items()) |entry| {
entry.value.destroy(allocator);
}
{
var it = self.failed_exports.iterator();
while (it.next()) |kv| {
kv.value.destroy(allocator);
}
self.failed_exports.deinit();
self.failed_files.deinit();
for (self.failed_exports.items()) |entry| {
entry.value.destroy(allocator);
}
{
var it = self.decl_exports.iterator();
while (it.next()) |kv| {
const export_list = kv.value;
allocator.free(export_list);
}
self.decl_exports.deinit();
self.failed_exports.deinit();
for (self.decl_exports.items()) |entry| {
const export_list = entry.value;
allocator.free(export_list);
}
{
var it = self.export_owners.iterator();
while (it.next()) |kv| {
freeExportList(allocator, kv.value);
}
self.export_owners.deinit();
self.decl_exports.deinit();
for (self.export_owners.items()) |entry| {
freeExportList(allocator, entry.value);
}
self.export_owners.deinit();
self.symbol_exports.deinit();
self.root_scope.destroy(allocator);
self.* = undefined;
@ -918,9 +907,9 @@ pub fn makeBinFileWritable(self: *Module) !void {
}
pub fn totalErrorCount(self: *Module) usize {
const total = self.failed_decls.size +
self.failed_files.size +
self.failed_exports.size;
const total = self.failed_decls.items().len +
self.failed_files.items().len +
self.failed_exports.items().len;
return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
}
@ -931,32 +920,23 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
var errors = std.ArrayList(AllErrors.Message).init(self.allocator);
defer errors.deinit();
{
var it = self.failed_files.iterator();
while (it.next()) |kv| {
const scope = kv.key;
const err_msg = kv.value;
const source = try scope.getSource(self);
try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
}
for (self.failed_files.items()) |entry| {
const scope = entry.key;
const err_msg = entry.value;
const source = try scope.getSource(self);
try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
}
{
var it = self.failed_decls.iterator();
while (it.next()) |kv| {
const decl = kv.key;
const err_msg = kv.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
for (self.failed_decls.items()) |entry| {
const decl = entry.key;
const err_msg = entry.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
{
var it = self.failed_exports.iterator();
while (it.next()) |kv| {
const decl = kv.key.owner_decl;
const err_msg = kv.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
for (self.failed_exports.items()) |entry| {
const decl = entry.key.owner_decl;
const err_msg = entry.value;
const source = try decl.scope.getSource(self);
try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
@ -1016,7 +996,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
decl.analysis = .dependency_failure;
},
else => {
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
self.allocator,
decl.src(),
@ -1086,7 +1066,7 @@ fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return error.AnalysisFail,
else => {
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
self.allocator,
decl.src(),
@ -1636,7 +1616,7 @@ fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void
fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.failed_files.size + 1);
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
const source = try root_scope.getSource(self);
@ -1677,7 +1657,7 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.failed_files.size + 1);
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
const source = try root_scope.getSource(self);
@ -1745,8 +1725,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name = tree.tokenSliceLoc(name_loc);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |kv| {
const decl = kv.value;
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
// have been re-ordered.
decl.src_index = decl_i;
@ -1774,14 +1753,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
// TODO also look for global variable declarations
// TODO also look for comptime blocks and exported globals
}
{
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
var it = deleted_decls.iterator();
while (it.next()) |kv| {
//std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
try self.deleteDecl(kv.key);
}
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
for (deleted_decls.items()) |entry| {
//std.debug.warn("noticed '{}' deleted from source\n", .{entry.key.name});
try self.deleteDecl(entry.key);
}
}
@ -1800,18 +1776,14 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
// we know which ones have been deleted.
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(self.decl_table.size);
{
var it = self.decl_table.iterator();
while (it.next()) |kv| {
deleted_decls.putAssumeCapacityNoClobber(kv.value, {});
}
try deleted_decls.ensureCapacity(self.decl_table.items().len);
for (self.decl_table.items()) |entry| {
deleted_decls.putAssumeCapacityNoClobber(entry.value, {});
}
for (src_module.decls) |src_decl, decl_i| {
const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name);
if (self.decl_table.get(name_hash)) |kv| {
const decl = kv.value;
if (self.decl_table.get(name_hash)) |decl| {
deleted_decls.removeAssertDiscard(decl);
//std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents });
if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) {
@ -1835,14 +1807,11 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
for (exports_to_resolve.items) |export_decl| {
_ = try self.resolveZirDecl(&root_scope.base, export_decl);
}
{
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
var it = deleted_decls.iterator();
while (it.next()) |kv| {
//std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
try self.deleteDecl(kv.key);
}
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
for (deleted_decls.items()) |entry| {
//std.debug.warn("noticed '{}' deleted from source\n", .{entry.key.name});
try self.deleteDecl(entry.key);
}
}
@ -1888,7 +1857,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
const kv = self.export_owners.remove(decl) orelse return;
for (kv.value) |exp| {
if (self.decl_exports.get(exp.exported_decl)) |decl_exports_kv| {
if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| {
// Remove exports with owner_decl matching the regenerating decl.
const list = decl_exports_kv.value;
var i: usize = 0;
@ -1983,7 +1952,7 @@ fn createNewDecl(
name_hash: Scope.NameHash,
contents_hash: std.zig.SrcHash,
) !*Decl {
try self.decl_table.ensureCapacity(self.decl_table.size + 1);
try self.decl_table.ensureCapacity(self.decl_table.items().len + 1);
const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash);
errdefer self.allocator.destroy(new_decl);
new_decl.name = try mem.dupeZ(self.allocator, u8, decl_name);
@ -2043,7 +2012,7 @@ fn resolveZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!
fn resolveZirDeclHavingIndex(self: *Module, scope: *Scope, src_decl: *zir.Decl, src_index: usize) InnerError!*Decl {
const name_hash = scope.namespace().fullyQualifiedNameHash(src_decl.name);
const decl = self.decl_table.getValue(name_hash).?;
const decl = self.decl_table.get(name_hash).?;
decl.src_index = src_index;
try self.ensureDeclAnalyzed(decl);
return decl;
@ -2148,8 +2117,8 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}),
}
try self.decl_exports.ensureCapacity(self.decl_exports.size + 1);
try self.export_owners.ensureCapacity(self.export_owners.size + 1);
try self.decl_exports.ensureCapacity(self.decl_exports.items().len + 1);
try self.export_owners.ensureCapacity(self.export_owners.items().len + 1);
const new_export = try self.allocator.create(Export);
errdefer self.allocator.destroy(new_export);
@ -2168,23 +2137,23 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
// Add to export_owners table.
const eo_gop = self.export_owners.getOrPut(owner_decl) catch unreachable;
if (!eo_gop.found_existing) {
eo_gop.kv.value = &[0]*Export{};
eo_gop.entry.value = &[0]*Export{};
}
eo_gop.kv.value = try self.allocator.realloc(eo_gop.kv.value, eo_gop.kv.value.len + 1);
eo_gop.kv.value[eo_gop.kv.value.len - 1] = new_export;
errdefer eo_gop.kv.value = self.allocator.shrink(eo_gop.kv.value, eo_gop.kv.value.len - 1);
eo_gop.entry.value = try self.allocator.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export;
errdefer eo_gop.entry.value = self.allocator.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
// Add to exported_decl table.
const de_gop = self.decl_exports.getOrPut(exported_decl) catch unreachable;
if (!de_gop.found_existing) {
de_gop.kv.value = &[0]*Export{};
de_gop.entry.value = &[0]*Export{};
}
de_gop.kv.value = try self.allocator.realloc(de_gop.kv.value, de_gop.kv.value.len + 1);
de_gop.kv.value[de_gop.kv.value.len - 1] = new_export;
errdefer de_gop.kv.value = self.allocator.shrink(de_gop.kv.value, de_gop.kv.value.len - 1);
de_gop.entry.value = try self.allocator.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
de_gop.entry.value[de_gop.entry.value.len - 1] = new_export;
errdefer de_gop.entry.value = self.allocator.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
if (self.symbol_exports.get(symbol_name)) |_| {
try self.failed_exports.ensureCapacity(self.failed_exports.size + 1);
try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
self.allocator,
src,
@ -2197,10 +2166,10 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
}
try self.symbol_exports.putNoClobber(symbol_name, new_export);
self.bin_file.updateDeclExports(self, exported_decl, de_gop.kv.value) catch |err| switch (err) {
self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
try self.failed_exports.ensureCapacity(self.failed_exports.size + 1);
try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
self.allocator,
src,
@ -2494,7 +2463,7 @@ fn getNextAnonNameIndex(self: *Module) usize {
fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl {
const namespace = scope.namespace();
const name_hash = namespace.fullyQualifiedNameHash(ident_name);
return self.decl_table.getValue(name_hash);
return self.decl_table.get(name_hash);
}
fn analyzeInstExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst {
@ -3489,8 +3458,8 @@ fn failNode(
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError {
{
errdefer err_msg.destroy(self.allocator);
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
try self.failed_files.ensureCapacity(self.failed_files.size + 1);
try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1);
try self.failed_files.ensureCapacity(self.failed_files.items().len + 1);
}
switch (scope.tag) {
.decl => {

View File

@ -705,7 +705,7 @@ const Function = struct {
}
fn resolveInst(self: *Function, inst: *ir.Inst) !MCValue {
if (self.inst_table.getValue(inst)) |mcv| {
if (self.inst_table.get(inst)) |mcv| {
return mcv;
}
if (inst.cast(ir.Inst.Constant)) |const_inst| {
@ -713,7 +713,7 @@ const Function = struct {
try self.inst_table.putNoClobber(inst, mcvalue);
return mcvalue;
} else {
return self.inst_table.getValue(inst).?;
return self.inst_table.get(inst).?;
}
}

View File

@ -1071,7 +1071,7 @@ pub const ElfFile = struct {
try self.file.?.pwriteAll(code, file_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.getValue(decl) orelse &[0]*Module.Export{};
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
@ -1093,7 +1093,7 @@ pub const ElfFile = struct {
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.failed_exports.size + 1);
try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
@ -1111,7 +1111,7 @@ pub const ElfFile = struct {
},
.Weak => elf.STB_WEAK,
.LinkOnce => {
try module.failed_exports.ensureCapacity(module.failed_exports.size + 1);
try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),

View File

@ -720,7 +720,7 @@ fn fmtPathDir(
defer dir.close();
const stat = try dir.stat();
if (try fmt.seen.put(stat.inode, {})) |_| return;
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
@ -768,7 +768,7 @@ fn fmtPathFile(
defer fmt.gpa.free(source_code);
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.put(stat.inode, {})) |_| return;
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
const tree = try std.zig.parse(fmt.gpa, source_code);
defer tree.deinit();

View File

@ -20,7 +20,7 @@ pub const Error = error{OutOfMemory};
const TypeError = Error || error{UnsupportedType};
const TransError = TypeError || error{UnsupportedTranslation};
const DeclTable = std.HashMap(usize, []const u8, addrHash, addrEql);
const DeclTable = std.HashMap(usize, []const u8, addrHash, addrEql, false);
fn addrHash(x: usize) u32 {
switch (@typeInfo(usize).Int.bits) {
@ -776,8 +776,8 @@ fn checkForBuiltinTypedef(checked_name: []const u8) ?[]const u8 {
}
fn transTypeDef(c: *Context, typedef_decl: *const ZigClangTypedefNameDecl, top_level_visit: bool) Error!?*ast.Node {
if (c.decl_table.get(@ptrToInt(ZigClangTypedefNameDecl_getCanonicalDecl(typedef_decl)))) |kv|
return transCreateNodeIdentifier(c, kv.value); // Avoid processing this decl twice
if (c.decl_table.get(@ptrToInt(ZigClangTypedefNameDecl_getCanonicalDecl(typedef_decl)))) |name|
return transCreateNodeIdentifier(c, name); // Avoid processing this decl twice
const rp = makeRestorePoint(c);
const typedef_name = try c.str(ZigClangNamedDecl_getName_bytes_begin(@ptrCast(*const ZigClangNamedDecl, typedef_decl)));
@ -818,8 +818,8 @@ fn transCreateNodeTypedef(rp: RestorePoint, typedef_decl: *const ZigClangTypedef
}
fn transRecordDecl(c: *Context, record_decl: *const ZigClangRecordDecl) Error!?*ast.Node {
if (c.decl_table.get(@ptrToInt(ZigClangRecordDecl_getCanonicalDecl(record_decl)))) |kv|
return try transCreateNodeIdentifier(c, kv.value); // Avoid processing this decl twice
if (c.decl_table.get(@ptrToInt(ZigClangRecordDecl_getCanonicalDecl(record_decl)))) |name|
return try transCreateNodeIdentifier(c, name); // Avoid processing this decl twice
const record_loc = ZigClangRecordDecl_getLocation(record_decl);
var bare_name = try c.str(ZigClangNamedDecl_getName_bytes_begin(@ptrCast(*const ZigClangNamedDecl, record_decl)));
@ -969,7 +969,7 @@ fn transRecordDecl(c: *Context, record_decl: *const ZigClangRecordDecl) Error!?*
fn transEnumDecl(c: *Context, enum_decl: *const ZigClangEnumDecl) Error!?*ast.Node {
if (c.decl_table.get(@ptrToInt(ZigClangEnumDecl_getCanonicalDecl(enum_decl)))) |name|
return try transCreateNodeIdentifier(c, name.value); // Avoid processing this decl twice
return try transCreateNodeIdentifier(c, name); // Avoid processing this decl twice
const rp = makeRestorePoint(c);
const enum_loc = ZigClangEnumDecl_getLocation(enum_decl);
@ -2130,7 +2130,7 @@ fn transInitListExprRecord(
var raw_name = try rp.c.str(ZigClangNamedDecl_getName_bytes_begin(@ptrCast(*const ZigClangNamedDecl, field_decl)));
if (ZigClangFieldDecl_isAnonymousStructOrUnion(field_decl)) {
const name = rp.c.decl_table.get(@ptrToInt(ZigClangFieldDecl_getCanonicalDecl(field_decl))).?;
raw_name = try mem.dupe(rp.c.arena, u8, name.value);
raw_name = try mem.dupe(rp.c.arena, u8, name);
}
const field_name_tok = try appendIdentifier(rp.c, raw_name);
@ -2855,7 +2855,7 @@ fn transMemberExpr(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangMemberE
const field_decl = @ptrCast(*const struct_ZigClangFieldDecl, member_decl);
if (ZigClangFieldDecl_isAnonymousStructOrUnion(field_decl)) {
const name = rp.c.decl_table.get(@ptrToInt(ZigClangFieldDecl_getCanonicalDecl(field_decl))).?;
break :blk try mem.dupe(rp.c.arena, u8, name.value);
break :blk try mem.dupe(rp.c.arena, u8, name);
}
}
const decl = @ptrCast(*const ZigClangNamedDecl, member_decl);
@ -6040,8 +6040,8 @@ fn getContainer(c: *Context, node: *ast.Node) ?*ast.Node {
} else if (node.id == .PrefixOp) {
return node;
} else if (node.cast(ast.Node.Identifier)) |ident| {
if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |kv| {
if (kv.value.cast(ast.Node.VarDecl)) |var_decl|
if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |value| {
if (value.cast(ast.Node.VarDecl)) |var_decl|
return getContainer(c, var_decl.init_node.?);
}
} else if (node.cast(ast.Node.InfixOp)) |infix| {
@ -6064,8 +6064,8 @@ fn getContainer(c: *Context, node: *ast.Node) ?*ast.Node {
fn getContainerTypeOf(c: *Context, ref: *ast.Node) ?*ast.Node {
if (ref.cast(ast.Node.Identifier)) |ident| {
if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |kv| {
if (kv.value.cast(ast.Node.VarDecl)) |var_decl| {
if (c.global_scope.sym_table.get(tokenSlice(c, ident.token))) |value| {
if (value.cast(ast.Node.VarDecl)) |var_decl| {
if (var_decl.type_node) |ty|
return getContainer(c, ty);
}
@ -6104,8 +6104,7 @@ fn getFnProto(c: *Context, ref: *ast.Node) ?*ast.Node.FnProto {
}
fn addMacros(c: *Context) !void {
var macro_it = c.global_scope.macro_table.iterator();
while (macro_it.next()) |kv| {
for (c.global_scope.macro_table.items()) |kv| {
if (getFnProto(c, kv.value)) |proto_node| {
// If a macro aliases a global variable which is a function pointer, we conclude that
// the macro is intended to represent a function that assumes the function pointer

View File

@ -758,7 +758,7 @@ pub const Module = struct {
}
fn writeInstParamToStream(self: Module, stream: var, inst: *Inst, inst_table: *const InstPtrTable) !void {
if (inst_table.getValue(inst)) |info| {
if (inst_table.get(inst)) |info| {
if (info.index) |i| {
try stream.print("%{}", .{info.index});
} else {
@ -843,7 +843,7 @@ const Parser = struct {
skipSpace(self);
const decl = try parseInstruction(self, &body_context, ident);
const ident_index = body_context.instructions.items.len;
if (try body_context.name_map.put(ident, decl.inst)) |_| {
if (try body_context.name_map.fetchPut(ident, decl.inst)) |_| {
return self.fail("redefinition of identifier '{}'", .{ident});
}
try body_context.instructions.append(decl.inst);
@ -929,7 +929,7 @@ const Parser = struct {
skipSpace(self);
const decl = try parseInstruction(self, null, ident);
const ident_index = self.decls.items.len;
if (try self.global_name_map.put(ident, decl.inst)) |_| {
if (try self.global_name_map.fetchPut(ident, decl.inst)) |_| {
return self.fail("redefinition of identifier '{}'", .{ident});
}
try self.decls.append(self.allocator, decl);
@ -1153,7 +1153,7 @@ const Parser = struct {
else => continue,
};
const ident = self.source[name_start..self.i];
const kv = map.get(ident) orelse {
return map.get(ident) orelse {
const bad_name = self.source[name_start - 1 .. self.i];
const src = name_start - 1;
if (local_ref) {
@ -1172,7 +1172,6 @@ const Parser = struct {
return &declval.base;
}
};
return kv.value;
}
fn generateName(self: *Parser) ![]u8 {
@ -1219,13 +1218,12 @@ const EmitZIR = struct {
// by the hash table.
var src_decls = std.ArrayList(*IrModule.Decl).init(self.allocator);
defer src_decls.deinit();
try src_decls.ensureCapacity(self.old_module.decl_table.size);
try self.decls.ensureCapacity(self.allocator, self.old_module.decl_table.size);
try self.names.ensureCapacity(self.old_module.decl_table.size);
try src_decls.ensureCapacity(self.old_module.decl_table.items().len);
try self.decls.ensureCapacity(self.allocator, self.old_module.decl_table.items().len);
try self.names.ensureCapacity(self.old_module.decl_table.items().len);
var decl_it = self.old_module.decl_table.iterator();
while (decl_it.next()) |kv| {
const decl = kv.value;
for (self.old_module.decl_table.items()) |entry| {
const decl = entry.value;
src_decls.appendAssumeCapacity(decl);
self.names.putAssumeCapacityNoClobber(mem.spanZ(decl.name), {});
}
@ -1248,7 +1246,7 @@ const EmitZIR = struct {
.codegen_failure,
.dependency_failure,
.codegen_failure_retryable,
=> if (self.old_module.failed_decls.getValue(ir_decl)) |err_msg| {
=> if (self.old_module.failed_decls.get(ir_decl)) |err_msg| {
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
@ -1270,7 +1268,7 @@ const EmitZIR = struct {
continue;
},
}
if (self.old_module.export_owners.getValue(ir_decl)) |exports| {
if (self.old_module.export_owners.get(ir_decl)) |exports| {
for (exports) |module_export| {
const symbol_name = try self.emitStringLiteral(module_export.src, module_export.options.name);
const export_inst = try self.arena.allocator.create(Inst.Export);
@ -1314,7 +1312,7 @@ const EmitZIR = struct {
try new_body.inst_table.putNoClobber(inst, new_inst);
return new_inst;
} else {
return new_body.inst_table.getValue(inst).?;
return new_body.inst_table.get(inst).?;
}
}
@ -1424,7 +1422,7 @@ const EmitZIR = struct {
try self.emitBody(body, &inst_table, &instructions);
},
.sema_failure => {
const err_msg = self.old_module.failed_decls.getValue(module_fn.owner_decl).?;
const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?;
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
@ -1841,7 +1839,7 @@ const EmitZIR = struct {
self.next_auto_name += 1;
const gop = try self.names.getOrPut(proposed_name);
if (!gop.found_existing) {
gop.kv.value = {};
gop.entry.value = {};
return proposed_name;
}
}
@ -1861,9 +1859,9 @@ const EmitZIR = struct {
},
.kw_args = .{},
};
gop.kv.value = try self.emitUnnamedDecl(&primitive_inst.base);
gop.entry.value = try self.emitUnnamedDecl(&primitive_inst.base);
}
return gop.kv.value;
return gop.entry.value;
}
fn emitStringLiteral(self: *EmitZIR, src: usize, str: []const u8) !*Decl {