Switch to unordered_map, slightly better performance
This commit is contained in:
parent
ac3ecbba14
commit
100b6d1816
@ -114,9 +114,9 @@ VoxelDataBlock *VoxelDataMap::get_block(Vector3i bpos) {
|
|||||||
if (_last_accessed_block && _last_accessed_block->position == bpos) {
|
if (_last_accessed_block && _last_accessed_block->position == bpos) {
|
||||||
return _last_accessed_block;
|
return _last_accessed_block;
|
||||||
}
|
}
|
||||||
unsigned int *iptr = _blocks_map.getptr(bpos);
|
auto it = _blocks_map.find(bpos);
|
||||||
if (iptr != nullptr) {
|
if (it != _blocks_map.end()) {
|
||||||
const unsigned int i = *iptr;
|
const unsigned int i = it->second;
|
||||||
#ifdef DEBUG_ENABLED
|
#ifdef DEBUG_ENABLED
|
||||||
CRASH_COND(i >= _blocks.size());
|
CRASH_COND(i >= _blocks.size());
|
||||||
#endif
|
#endif
|
||||||
@ -132,9 +132,9 @@ const VoxelDataBlock *VoxelDataMap::get_block(Vector3i bpos) const {
|
|||||||
if (_last_accessed_block != nullptr && _last_accessed_block->position == bpos) {
|
if (_last_accessed_block != nullptr && _last_accessed_block->position == bpos) {
|
||||||
return _last_accessed_block;
|
return _last_accessed_block;
|
||||||
}
|
}
|
||||||
const unsigned int *iptr = _blocks_map.getptr(bpos);
|
auto it = _blocks_map.find(bpos);
|
||||||
if (iptr != nullptr) {
|
if (it != _blocks_map.end()) {
|
||||||
const unsigned int i = *iptr;
|
const unsigned int i = it->second;
|
||||||
#ifdef DEBUG_ENABLED
|
#ifdef DEBUG_ENABLED
|
||||||
CRASH_COND(i >= _blocks.size());
|
CRASH_COND(i >= _blocks.size());
|
||||||
#endif
|
#endif
|
||||||
@ -153,11 +153,11 @@ void VoxelDataMap::set_block(Vector3i bpos, VoxelDataBlock *block) {
|
|||||||
_last_accessed_block = block;
|
_last_accessed_block = block;
|
||||||
}
|
}
|
||||||
#ifdef DEBUG_ENABLED
|
#ifdef DEBUG_ENABLED
|
||||||
CRASH_COND(_blocks_map.has(bpos));
|
CRASH_COND(_blocks_map.find(bpos) != _blocks_map.end());
|
||||||
#endif
|
#endif
|
||||||
unsigned int i = _blocks.size();
|
unsigned int i = _blocks.size();
|
||||||
_blocks.push_back(block);
|
_blocks.push_back(block);
|
||||||
_blocks_map.set(bpos, i);
|
_blocks_map.insert(std::make_pair(bpos, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
void VoxelDataMap::remove_block_internal(Vector3i bpos, unsigned int index) {
|
void VoxelDataMap::remove_block_internal(Vector3i bpos, unsigned int index) {
|
||||||
@ -176,9 +176,9 @@ void VoxelDataMap::remove_block_internal(Vector3i bpos, unsigned int index) {
|
|||||||
_blocks.pop_back();
|
_blocks.pop_back();
|
||||||
|
|
||||||
if (index < _blocks.size()) {
|
if (index < _blocks.size()) {
|
||||||
unsigned int *moved_block_index = _blocks_map.getptr(moved_block->position);
|
auto it = _blocks_map.find(moved_block->position);
|
||||||
CRASH_COND(moved_block_index == nullptr);
|
CRASH_COND(it == _blocks_map.end());
|
||||||
*moved_block_index = index;
|
it->second = index;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +195,8 @@ VoxelDataBlock *VoxelDataMap::set_block_buffer(Vector3i bpos, std::shared_ptr<Vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool VoxelDataMap::has_block(Vector3i pos) const {
|
bool VoxelDataMap::has_block(Vector3i pos) const {
|
||||||
return /*(_last_accessed_block != nullptr && _last_accessed_block->pos == pos) ||*/ _blocks_map.has(pos);
|
return /*(_last_accessed_block != nullptr && _last_accessed_block->pos == pos) ||*/
|
||||||
|
_blocks_map.find(pos) != _blocks_map.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VoxelDataMap::is_block_surrounded(Vector3i pos) const {
|
bool VoxelDataMap::is_block_surrounded(Vector3i pos) const {
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
#include "../util/profiling.h"
|
#include "../util/profiling.h"
|
||||||
#include "voxel_data_block.h"
|
#include "voxel_data_block.h"
|
||||||
|
|
||||||
#include <core/hash_map.h>
|
|
||||||
#include <scene/main/node.h>
|
#include <scene/main/node.h>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
// Infinite voxel storage by means of octants like Gridmap, within a constant LOD.
|
// Infinite voxel storage by means of octants like Gridmap, within a constant LOD.
|
||||||
// Convenience functions to access VoxelBuffers internally will lock them to protect against multithreaded access.
|
// Convenience functions to access VoxelBuffers internally will lock them to protect against multithreaded access.
|
||||||
@ -74,9 +74,9 @@ public:
|
|||||||
if (_last_accessed_block && _last_accessed_block->position == bpos) {
|
if (_last_accessed_block && _last_accessed_block->position == bpos) {
|
||||||
_last_accessed_block = nullptr;
|
_last_accessed_block = nullptr;
|
||||||
}
|
}
|
||||||
unsigned int *iptr = _blocks_map.getptr(bpos);
|
auto it = _blocks_map.find(bpos);
|
||||||
if (iptr != nullptr) {
|
if (it != _blocks_map.end()) {
|
||||||
const unsigned int i = *iptr;
|
const unsigned int i = it->second;
|
||||||
#ifdef DEBUG_ENABLED
|
#ifdef DEBUG_ENABLED
|
||||||
CRASH_COND(i >= _blocks.size());
|
CRASH_COND(i >= _blocks.size());
|
||||||
#endif
|
#endif
|
||||||
@ -162,8 +162,10 @@ private:
|
|||||||
FixedArray<uint64_t, VoxelBufferInternal::MAX_CHANNELS> _default_voxel;
|
FixedArray<uint64_t, VoxelBufferInternal::MAX_CHANNELS> _default_voxel;
|
||||||
|
|
||||||
// Blocks stored with a spatial hash in all 3D directions.
|
// Blocks stored with a spatial hash in all 3D directions.
|
||||||
// RELATIONSHIP = 2 because it delivers better performance with this kind of key and hash (less collisions).
|
// Before I used Godot's HashMap with RELATIONSHIP = 2 because that delivers better performance compared to
|
||||||
HashMap<Vector3i, unsigned int, Vector3iHasher, HashMapComparatorDefault<Vector3i>, 3, 2> _blocks_map;
|
// defaults, but it sometimes has very long stalls on removal, which std::unordered_map doesn't seem to have
|
||||||
|
// (not as badly). Also overall performance is slightly better.
|
||||||
|
std::unordered_map<Vector3i, unsigned int> _blocks_map;
|
||||||
std::vector<VoxelDataBlock *> _blocks;
|
std::vector<VoxelDataBlock *> _blocks;
|
||||||
|
|
||||||
// Voxel access will most frequently be in contiguous areas, so the same blocks are accessed.
|
// Voxel access will most frequently be in contiguous areas, so the same blocks are accessed.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user