Compare commits

...

5 Commits

Author SHA1 Message Date
Marc Gilleron 3606424947 Fix bool used as pointer 2022-09-03 18:13:26 +01:00
Marc Gilleron dda396bb00 Fix narrowing conversion warning 2022-09-03 18:12:37 +01:00
Marc Gilleron 08bc5095d1 Removed old code 2022-09-03 17:51:47 +01:00
Marc Gilleron fbceed3666 Fix errors in unused template functions 2022-09-03 17:46:20 +01:00
Marc Gilleron 1e3a4abe08 Fix tests compilation 2022-09-03 17:41:34 +01:00
6 changed files with 17 additions and 199 deletions

View File

@ -109,7 +109,12 @@ inline real_t raw_voxel_to_real(uint64_t value, VoxelBufferInternal::Depth depth
namespace {
uint64_t g_default_values[VoxelBufferInternal::MAX_CHANNELS] = {
0, // TYPE
snorm_to_s16(1.f), // SDF
// Casted explicitely to avoid warning about narrowing conversion, the intent is to store all bits of the value
// as-is in a type that can store them all. The interpretation of the type is meaningless (depends on its use). It
// should be possible to cast it back to the actual type with no loss of data, as long as all bits are preserved.
uint16_t(snorm_to_s16(1.f)), // SDF
encode_indices_to_packed_u16(0, 1, 2, 3), // INDICES
encode_weights_to_packed_u16(15, 0, 0, 0), // WEIGHTS
0, 0, 0, 0 //
@ -128,7 +133,7 @@ VoxelBufferInternal::VoxelBufferInternal() {
// 16-bit is better on average to handle large worlds
_channels[CHANNEL_SDF].depth = DEFAULT_SDF_CHANNEL_DEPTH;
_channels[CHANNEL_SDF].defval = snorm_to_s16(1.f);
_channels[CHANNEL_SDF].defval = uint16_t(snorm_to_s16(1.f));
_channels[CHANNEL_INDICES].depth = DEPTH_16_BIT;
_channels[CHANNEL_INDICES].defval = encode_indices_to_packed_u16(0, 1, 2, 3);

View File

@ -460,7 +460,7 @@ VoxelDataBlock *VoxelData::try_set_block_buffer(Vector3i block_position, unsigne
if (buffer->get_size() != Vector3iUtil::create(get_block_size())) {
// Voxel block size is incorrect, drop it
ERR_PRINT("Block is different from expected size");
return false;
return nullptr;
}
// Store buffer

View File

@ -110,60 +110,6 @@ public:
// This is necessary for editing destructively.
bool is_area_loaded(const Box3i p_voxels_box) const;
// Executes a read+write operation on all voxels in the given area, on a specific channel.
// If the area intersects the boundaries of the volume, it will be clipped.
// If the area intersects blocks that aren't loaded, the operation will be cancelled.
// Returns the box of voxels which were effectively processed.
template <typename F>
Box3i write_box(const Box3i &p_voxel_box, unsigned int channel_index, F action) {
const Box3i voxel_box = p_voxel_box.clipped(get_bounds());
if (!is_area_loaded(voxel_box)) {
ZN_PRINT_VERBOSE("Area not editable");
return Box3i();
}
Ref<VoxelGenerator> generator = _generator;
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
{
// New blocks can be created in the map so we have to lock for writing
RWLockWrite wlock(data_lod0.map_lock);
data_lod0.map.write_box(
voxel_box, channel_index, action, [&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
generator->generate_block(q);
}
});
}
return voxel_box;
}
// Executes a read+write operation on all voxels in the given area, on two specific channels.
// If the area intersects the boundaries of the volume, it will be clipped.
// If the area intersects blocks that aren't loaded, the operation will be cancelled.
// Returns the box of voxels which were effectively processed.
template <typename F>
Box3i write_box_2(const Box3i &p_voxel_box, unsigned int channel1_index, unsigned int channel2_index, F action) {
const Box3i voxel_box = p_voxel_box.clipped(get_bounds());
if (!is_area_loaded(voxel_box)) {
ZN_PRINT_VERBOSE("Area not editable");
return Box3i();
}
Ref<VoxelGenerator> generator = _generator;
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
{
// New blocks can be created in the map so we have to lock for writing
RWLockWrite wlock(data_lod0.map_lock);
data_lod0.map.write_box_2(voxel_box, channel1_index, channel2_index, action,
[&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
generator->generate_block(q);
}
});
}
return voxel_box;
}
// Generates all non-present blocks in preparation for an edit.
// Every block intersecting with the box at every LOD will be checked.
// This function runs sequentially and should be thread-safe. May be used if blocks are immediately needed.

View File

@ -317,114 +317,4 @@ bool VoxelDataMap::is_area_fully_loaded(const Box3i voxels_box) const {
});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming) {
ZN_PROFILE_SCOPE();
//ERR_FAIL_COND_MSG(_full_load_mode == false, nullptr, "This function can only be used in full load mode");
struct Task {
Vector3i block_pos;
uint32_t lod_index;
std::shared_ptr<VoxelBufferInternal> voxels;
};
std::vector<Task> todo;
// We'll pack tasks per LOD so we'll have less locking to do
std::vector<unsigned int> count_per_lod;
const unsigned int data_block_size = data.lods[0].map.get_block_size();
// Find empty slots
for (unsigned int lod_index = 0; lod_index < data.lod_count; ++lod_index) {
const Box3i block_box = voxel_box.downscaled(data_block_size << lod_index);
//ZN_PRINT_VERBOSE(format("Preloading box {} at lod {} synchronously", block_box, lod_index));
VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
const unsigned int prev_size = todo.size();
{
RWLockRead rlock(data_lod.map_lock);
block_box.for_each_cell([&data_lod, lod_index, &todo, is_streaming](Vector3i block_pos) {
// We don't check "loading blocks", because this function wants to complete the task right now.
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
if (is_streaming) {
// Non-resident blocks must not be touched because we don't know what's in them.
// We can generate caches if resident ones have no voxel data.
if (block != nullptr && !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
} else {
// We can generate anywhere voxel data is not in memory
if (block == nullptr || !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
}
});
}
count_per_lod.push_back(todo.size() - prev_size);
}
const Vector3i block_size = Vector3iUtil::create(data_block_size);
// Generate
for (unsigned int i = 0; i < todo.size(); ++i) {
Task &task = todo[i];
task.voxels = make_shared_instance<VoxelBufferInternal>();
task.voxels->create(block_size);
// TODO Format?
if (generator != nullptr) {
ZN_PROFILE_SCOPE_NAMED("Generate");
VoxelGenerator::VoxelQueryData q{ *task.voxels, task.block_pos * (data_block_size << task.lod_index),
task.lod_index };
generator->generate_block(q);
data.modifiers.apply(q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << q.lod));
}
}
// Populate slots
unsigned int task_index = 0;
for (unsigned int lod_index = 0; lod_index < data.lod_count; ++lod_index) {
ZN_ASSERT(lod_index < count_per_lod.size());
const unsigned int count = count_per_lod[lod_index];
if (count > 0) {
const unsigned int end_task_index = task_index + count;
VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
RWLockWrite wlock(data_lod.map_lock);
for (; task_index < end_task_index; ++task_index) {
Task &task = todo[task_index];
ZN_ASSERT(task.lod_index == lod_index);
const VoxelDataBlock *prev_block = data_lod.map.get_block(task.block_pos);
if (prev_block != nullptr && prev_block->has_voxels()) {
// Sorry, that block has been set in the meantime by another thread.
// We'll assume the block we just generated is redundant and discard it.
continue;
}
data_lod.map.set_block_buffer(task.block_pos, task.voxels, true);
}
}
}
}
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box) {
for (unsigned int lod_index = 0; lod_index < data.lod_count; ++lod_index) {
VoxelDataLodMap::Lod &lod = data.lods[lod_index];
RWLockRead rlock(lod.map_lock);
const Box3i blocks_box = p_voxel_box.downscaled(lod.map.get_block_size() << lod_index);
blocks_box.for_each_cell_zxy([&lod](const Vector3i bpos) {
VoxelDataBlock *block = lod.map.get_block(bpos);
if (block == nullptr || block->is_edited() || block->is_modified()) {
return;
}
block->clear_voxels();
});
}
}
*/
} // namespace zylann::voxel

View File

@ -206,29 +206,6 @@ private:
unsigned int _lod_index = 0;
};
/*struct VoxelDataLodMap {
struct Lod {
VoxelDataMap map;
// This lock should be locked in write mode only when the map gets modified (adding or removing blocks).
// Otherwise it may be locked in read mode.
// It is possible to unlock it after we are done querying the map.
RWLock map_lock;
};
// Each LOD works in a set of coordinates spanning 2x more voxels the higher their index is
FixedArray<Lod, constants::MAX_LOD> lods;
unsigned int lod_count = 1;
VoxelModifierStack modifiers;
};
// Generates all non-present blocks in preparation for an edit.
// Every block intersecting with the box at every LOD will be checked.
// This function runs sequentially and should be thread-safe. May be used if blocks are immediately needed.
// It will block if other threads are accessing the same data.
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming);
// Clears voxel data from blocks that are pure results of generators and modifiers.
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box);*/
} // namespace zylann::voxel
#endif // VOXEL_MAP_H

View File

@ -5,6 +5,7 @@
#include "../meshers/blocky/voxel_blocky_library.h"
#include "../meshers/cubes/voxel_mesher_cubes.h"
#include "../storage/voxel_buffer_gd.h"
#include "../storage/voxel_data.h"
#include "../storage/voxel_data_map.h"
#include "../storage/voxel_metadata_variant.h"
#include "../streams/instance_data.h"
@ -98,7 +99,7 @@ void test_voxel_data_map_paste_fill() {
buffer.fill(voxel_value, channel);
VoxelDataMap map;
map.create(4, 0);
map.create(0);
const Box3i box(Vector3i(10, 10, 10), buffer.get_size());
@ -141,7 +142,7 @@ void test_voxel_data_map_paste_mask() {
}
VoxelDataMap map;
map.create(4, 0);
map.create(0);
const Box3i box(Vector3i(10, 10, 10), buffer.get_size());
@ -200,7 +201,7 @@ void test_voxel_data_map_copy() {
static const int channel = VoxelBufferInternal::CHANNEL_TYPE;
VoxelDataMap map;
map.create(4, 0);
map.create(0);
Box3i box(10, 10, 10, 32, 16, 32);
VoxelBufferInternal buffer;
@ -1572,13 +1573,12 @@ void test_run_blocky_random_tick() {
tickable_voxel->set_random_tickable(true);
// Create test map
VoxelDataMap map;
map.create(constants::DEFAULT_BLOCK_SIZE_PO2, 0);
VoxelData data;
{
// All blocks of this map will be the same,
// an interleaving of all block types
VoxelBufferInternal model_buffer;
model_buffer.create(Vector3iUtil::create(map.get_block_size()));
model_buffer.create(Vector3iUtil::create(data.get_block_size()));
for (int z = 0; z < model_buffer.get_size().z; ++z) {
for (int x = 0; x < model_buffer.get_size().x; ++x) {
for (int y = 0; y < model_buffer.get_size().y; ++y) {
@ -1589,11 +1589,11 @@ void test_run_blocky_random_tick() {
}
const Box3i world_blocks_box(-4, -4, -4, 8, 8, 8);
world_blocks_box.for_each_cell_zxy([&map, &model_buffer](Vector3i block_pos) {
world_blocks_box.for_each_cell_zxy([&data, &model_buffer](Vector3i block_pos) {
std::shared_ptr<VoxelBufferInternal> buffer = make_shared_instance<VoxelBufferInternal>();
buffer->create(model_buffer.get_size());
buffer->copy_from(model_buffer);
map.set_block_buffer(block_pos, buffer, false);
ZYLANN_TEST_ASSERT(data.try_set_block_buffer(block_pos, 0, buffer, true, false) != nullptr);
});
}
@ -1630,7 +1630,7 @@ void test_run_blocky_random_tick() {
Math::seed(131183);
VoxelToolTerrain::run_blocky_random_tick_static(
map, voxel_box, **library, 1000, 4, &cb, [](void *self, Vector3i pos, int64_t val) {
data, voxel_box, **library, 1000, 4, &cb, [](void *self, Vector3i pos, int64_t val) {
Callback *cb = (Callback *)self;
return cb->exec(pos, val);
});