Replaced VoxelDataMap with VoxelData in VoxelLodTerrain

master
Marc Gilleron 2022-08-28 20:45:42 +01:00
parent dfbd7abfc1
commit 414aea52f3
23 changed files with 1407 additions and 329 deletions

View File

@ -28,7 +28,7 @@ VoxelToolLodTerrain::VoxelToolLodTerrain(VoxelLodTerrain *terrain) : _terrain(te
bool VoxelToolLodTerrain::is_area_editable(const Box3i &box) const {
ERR_FAIL_COND_V(_terrain == nullptr, false);
return _terrain->is_area_editable(box);
return _terrain->get_storage().is_area_loaded(box);
}
// Binary search can be more accurate than linear regression because the SDF can be inaccurate in the first place.
@ -80,17 +80,22 @@ float approximate_distance_to_isosurface_binary_search(
Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
Vector3 pos, Vector3 dir, float max_distance, uint32_t collision_mask) {
// TODO Transform input if the terrain is rotated
// TODO Optimization: implement broad-phase on blocks to minimize locking and increase performance
// TODO Implement reverse raycast? (going from inside ground to air, could be useful for undigging)
// TODO Optimization: voxel raycast uses `get_voxel` which is the slowest, but could be made faster.
// Instead, do a broad-phase on blocks. If a block's voxels need to be parsed, get all positions the ray could go
// through in that block, then query them all at once (better for bulk processing without going again through
// locking and data structures, and allows SIMD). Then check results in order.
// If no hit is found, carry on with next blocks.
struct RaycastPredicate {
VoxelLodTerrain *terrain;
VoxelData &data;
bool operator()(const VoxelRaycastState &rs) {
// This is not particularly optimized, but runs fast enough for player raycasts
VoxelSingleValue defval;
defval.f = 1.f;
const VoxelSingleValue v = terrain->get_voxel(rs.hit_position, VoxelBufferInternal::CHANNEL_SDF, defval);
const VoxelSingleValue v = data.get_voxel(rs.hit_position, VoxelBufferInternal::CHANNEL_SDF, defval);
return v.f < 0;
}
};
@ -98,7 +103,7 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
Ref<VoxelRaycastResult> res;
// We use grid-raycast as a middle-phase to roughly detect where the hit will be
RaycastPredicate predicate = { _terrain };
RaycastPredicate predicate = { _terrain->get_storage() };
Vector3i hit_pos;
Vector3i prev_pos;
float hit_distance;
@ -127,17 +132,17 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
if (_raycast_binary_search_iterations > 0) {
// This is not particularly optimized, but runs fast enough for player raycasts
struct VolumeSampler {
VoxelLodTerrain *terrain;
VoxelData &data;
inline float operator()(const Vector3i &pos) const {
VoxelSingleValue defval;
defval.f = 1.f;
const VoxelSingleValue value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, defval);
const VoxelSingleValue value = data.get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, defval);
return value.f;
}
};
VolumeSampler sampler{ _terrain };
VolumeSampler sampler{ _terrain->get_storage() };
d = hit_distance_prev +
approximate_distance_to_isosurface_binary_search(sampler, pos + dir * hit_distance_prev, dir,
hit_distance - hit_distance_prev, _raycast_binary_search_iterations);
@ -172,17 +177,11 @@ void VoxelToolLodTerrain::do_sphere(Vector3 center, float radius) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _terrain->get_storage();
ERR_FAIL_COND(data == nullptr);
VoxelDataLodMap::Lod &data_lod = data->lods[0];
VoxelData &data = _terrain->get_storage();
preload_box(*data, op.box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
{
RWLockRead rlock(data_lod.map_lock);
op.blocks.reference_area(data_lod.map, op.box);
op();
}
data.pre_generate_box(op.box);
data.get_blocks_grid(op.blocks, op.box, 0);
op();
_post_edit(op.box);
}
@ -210,17 +209,11 @@ void VoxelToolLodTerrain::do_hemisphere(Vector3 center, float radius, Vector3 fl
return;
}
std::shared_ptr<VoxelDataLodMap> data = _terrain->get_storage();
ERR_FAIL_COND(data == nullptr);
VoxelDataLodMap::Lod &data_lod = data->lods[0];
VoxelData &data = _terrain->get_storage();
preload_box(*data, op.box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
{
RWLockRead rlock(data_lod.map_lock);
op.blocks.reference_area(data_lod.map, op.box);
op();
}
data.pre_generate_box(op.box);
data.get_blocks_grid(op.blocks, op.box, 0);
op();
_post_edit(op.box);
}
@ -228,25 +221,20 @@ void VoxelToolLodTerrain::do_hemisphere(Vector3 center, float radius, Vector3 fl
template <typename Op_T>
class VoxelToolAsyncEdit : public IThreadedTask {
public:
VoxelToolAsyncEdit(Op_T op, std::shared_ptr<VoxelDataLodMap> data) : _op(op), _data(data) {
VoxelToolAsyncEdit(Op_T op, std::shared_ptr<VoxelData> data) : _op(op), _data(data) {
_tracker = make_shared_instance<AsyncDependencyTracker>(1);
}
void run(ThreadedTaskContext ctx) override {
ZN_PROFILE_SCOPE();
CRASH_COND(_data == nullptr);
VoxelDataLodMap::Lod &data_lod = _data->lods[0];
{
// TODO Prefer a spatial lock?
// We want blocks inside the edited area to not be accessed by other threads,
// but this locks the entire map, not just our area. If we used a spatial lock we would only need to lock
// the map for the duration of `reference_area`.
RWLockRead rlock(data_lod.map_lock);
// TODO May want to fail if not all blocks were found
_op.blocks.reference_area(data_lod.map, _op.box);
// TODO Need to apply modifiers
_op();
}
ZN_ASSERT(_data != nullptr);
// TODO Thread-safety: not sure if this is entirely safe, VoxelDataBlock members aren't protected.
// Only the map and VoxelBuffers are. To fix this we could migrate to a spatial lock.
// TODO May want to fail if not all blocks were found
// TODO Need to apply modifiers
_data->get_blocks_grid(_op.blocks, _op.box, 0);
_op();
_tracker->post_complete();
}
@ -257,7 +245,7 @@ public:
private:
Op_T _op;
// We reference this just to keep map pointers alive
std::shared_ptr<VoxelDataLodMap> _data;
std::shared_ptr<VoxelData> _data;
std::shared_ptr<AsyncDependencyTracker> _tracker;
};
@ -280,8 +268,7 @@ void VoxelToolLodTerrain::do_sphere_async(Vector3 center, float radius) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _terrain->get_storage();
ERR_FAIL_COND(data == nullptr);
std::shared_ptr<VoxelData> data = _terrain->get_storage_shared();
VoxelToolAsyncEdit<ops::DoSphere> *task = memnew(VoxelToolAsyncEdit<ops::DoSphere>(op, data));
_terrain->push_async_edit(task, op.box, task->get_tracker());
@ -293,19 +280,20 @@ void VoxelToolLodTerrain::copy(Vector3i pos, Ref<gd::VoxelBuffer> dst, uint8_t c
if (channels_mask == 0) {
channels_mask = (1 << _channel);
}
_terrain->copy(pos, dst->get_buffer(), channels_mask);
_terrain->get_storage().copy(pos, dst->get_buffer(), channels_mask);
}
float VoxelToolLodTerrain::get_voxel_f_interpolated(Vector3 position) const {
ZN_PROFILE_SCOPE();
ERR_FAIL_COND_V(_terrain == nullptr, 0);
const int channel = get_channel();
VoxelLodTerrain *terrain = _terrain;
VoxelData &data = _terrain->get_storage();
// TODO Optimization: is it worth a making a fast-path for this?
return get_sdf_interpolated(
[terrain, channel](Vector3i ipos) {
[&data, channel](Vector3i ipos) {
VoxelSingleValue defval;
defval.f = 1.f;
VoxelSingleValue value = terrain->get_voxel(ipos, channel, defval);
VoxelSingleValue value = data.get_voxel(ipos, channel, defval);
return value.f;
},
position);
@ -315,25 +303,27 @@ uint64_t VoxelToolLodTerrain::_get_voxel(Vector3i pos) const {
ERR_FAIL_COND_V(_terrain == nullptr, 0);
VoxelSingleValue defval;
defval.i = 0;
return _terrain->get_voxel(pos, _channel, defval).i;
return _terrain->get_storage().get_voxel(pos, _channel, defval).i;
}
float VoxelToolLodTerrain::_get_voxel_f(Vector3i pos) const {
ERR_FAIL_COND_V(_terrain == nullptr, 0);
VoxelSingleValue defval;
defval.f = 1.f;
return _terrain->get_voxel(pos, _channel, defval).f;
return _terrain->get_storage().get_voxel(pos, _channel, defval).f;
}
void VoxelToolLodTerrain::_set_voxel(Vector3i pos, uint64_t v) {
ERR_FAIL_COND(_terrain == nullptr);
_terrain->try_set_voxel_without_update(pos, _channel, v);
_terrain->get_storage().try_set_voxel(v, pos, _channel);
// No post_update, the parent class does it, it's a generic slow implemntation
}
void VoxelToolLodTerrain::_set_voxel_f(Vector3i pos, float v) {
ERR_FAIL_COND(_terrain == nullptr);
// TODO Format should be accessible from terrain
_terrain->try_set_voxel_without_update(pos, _channel, snorm_to_s16(v));
_terrain->get_storage().try_set_voxel_f(v, pos, _channel);
// No post_update, the parent class does it, it's a generic slow implemntation
}
void VoxelToolLodTerrain::_post_edit(const Box3i &box) {
@ -729,11 +719,9 @@ void VoxelToolLodTerrain::stamp_sdf(
return;
}
std::shared_ptr<VoxelDataLodMap> data = _terrain->get_storage();
ERR_FAIL_COND(data == nullptr);
VoxelDataLodMap::Lod &data_lod = data->lods[0];
VoxelData &data = _terrain->get_storage();
preload_box(*data, voxel_box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
data.pre_generate_box(voxel_box);
// TODO Maybe more efficient to "rasterize" the box? We're going to iterate voxels the box doesnt intersect
// TODO Maybe we should scale SDF values based on the scale of the transform too
@ -753,11 +741,8 @@ void VoxelToolLodTerrain::stamp_sdf(
ZN_ASSERT_RETURN(buffer.get_channel_data(channel, op.shape.buffer));
VoxelDataGrid grid;
{
RWLockRead rlock(data_lod.map_lock);
grid.reference_area(data_lod.map, voxel_box);
grid.write_box(voxel_box, VoxelBufferInternal::CHANNEL_SDF, op);
}
data.get_blocks_grid(grid, voxel_box, 0);
grid.write_box(voxel_box, VoxelBufferInternal::CHANNEL_SDF, op);
_post_edit(voxel_box);
}
@ -784,16 +769,15 @@ void VoxelToolLodTerrain::do_graph(Ref<VoxelGeneratorGraph> graph, Transform3D t
return;
}
std::shared_ptr<VoxelDataLodMap> data = _terrain->get_storage();
ERR_FAIL_COND(data == nullptr);
VoxelData &data = _terrain->get_storage();
preload_box(*data, box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
data.pre_generate_box(box);
const unsigned int channel_index = VoxelBufferInternal::CHANNEL_SDF;
VoxelBufferInternal buffer;
buffer.create(box.size);
_terrain->copy(box.pos, buffer, 1 << channel_index);
data.copy(box.pos, buffer, 1 << channel_index);
buffer.decompress_channel(channel_index);
@ -880,7 +864,7 @@ void VoxelToolLodTerrain::do_graph(Ref<VoxelGeneratorGraph> graph, Transform3D t
scale_and_store_sdf(buffer, in_sdf_full);
_terrain->paste(box.pos, buffer, 1 << channel_index);
data.paste(box.pos, buffer, 1 << channel_index, false, 0, false);
_post_edit(box);
}

View File

@ -1,8 +1,8 @@
#include "distance_normalmaps.h"
#include "../edition/funcs.h"
#include "../generators/voxel_generator.h"
#include "../storage/voxel_data.h"
#include "../storage/voxel_data_grid.h"
#include "../storage/voxel_data_map.h"
#include "../util/math/conv.h"
#include "../util/math/triangle.h"
#include "../util/profiling.h"
@ -165,7 +165,7 @@ inline Vector3f encode_normal_xyz(const Vector3f n) {
return Vector3f(0.5f) + 0.5f * n;
}
void query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &voxel_data, const VoxelDataGrid &grid,
void query_sdf_with_edits(VoxelGenerator &generator, const VoxelData &voxel_data, const VoxelDataGrid &grid,
Span<const float> query_x_buffer, Span<const float> query_y_buffer, Span<const float> query_z_buffer,
Span<float> query_sdf_buffer, Vector3f query_min_pos, Vector3f query_max_pos) {
ZN_PROFILE_SCOPE();
@ -228,8 +228,8 @@ void query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &voxe
generator.generate_series(to_span(x_gen, gen_count), to_span(y_gen, gen_count), to_span(z_gen, gen_count),
channel, to_span(gen_samples, gen_count), query_min_pos, query_max_pos);
voxel_data.modifiers.apply(to_span(x_gen, gen_count), to_span(y_gen, gen_count), to_span(z_gen, gen_count),
to_span(gen_samples, gen_count), query_min_pos, query_max_pos);
voxel_data.get_modifiers().apply(to_span(x_gen, gen_count), to_span(y_gen, gen_count),
to_span(z_gen, gen_count), to_span(gen_samples, gen_count), query_min_pos, query_max_pos);
for (unsigned int j = 0; j < gen_count; ++j) {
sd_samples[i_gen[j]] = gen_samples[j];
@ -244,9 +244,9 @@ void query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &voxe
}
}
bool try_query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &voxel_data,
Span<const float> query_x_buffer, Span<const float> query_y_buffer, Span<const float> query_z_buffer,
Span<float> query_sdf_buffer, Vector3f query_min_pos, Vector3f query_max_pos) {
bool try_query_sdf_with_edits(VoxelGenerator &generator, const VoxelData &voxel_data, Span<const float> query_x_buffer,
Span<const float> query_y_buffer, Span<const float> query_z_buffer, Span<float> query_sdf_buffer,
Vector3f query_min_pos, Vector3f query_max_pos) {
ZN_PROFILE_SCOPE();
// Pad by 1 in case there are neighboring edited voxels. If not done, it creates a grid pattern following LOD0 block
@ -275,9 +275,10 @@ bool try_query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &
return false;
}
const VoxelDataLodMap::Lod &lod0 = voxel_data.lods[0];
RWLockRead rlock(lod0.map_lock);
tls_grid.reference_area(lod0.map, voxel_box);
voxel_data.get_blocks_grid(tls_grid, voxel_box, 0);
// const VoxelDataLodMap::Lod &lod0 = voxel_data.lods[0];
// RWLockRead rlock(lod0.map_lock);
// tls_grid.reference_area(lod0.map, voxel_box);
}
if (!tls_grid.has_any_block()) {
@ -291,7 +292,7 @@ bool try_query_sdf_with_edits(VoxelGenerator &generator, const VoxelDataLodMap &
return true;
}
inline void query_sdf(VoxelGenerator &generator, const VoxelDataLodMap *voxel_data, Span<const float> query_x_buffer,
inline void query_sdf(VoxelGenerator &generator, const VoxelData *voxel_data, Span<const float> query_x_buffer,
Span<const float> query_y_buffer, Span<const float> query_z_buffer, Span<float> query_sdf_buffer,
Vector3f query_min_pos, Vector3f query_max_pos) {
ZN_PROFILE_SCOPE();
@ -310,7 +311,7 @@ inline void query_sdf(VoxelGenerator &generator, const VoxelDataLodMap *voxel_da
query_sdf_buffer, query_min_pos, query_max_pos);
if (voxel_data != nullptr) {
voxel_data->modifiers.apply(
voxel_data->get_modifiers().apply(
query_x_buffer, query_y_buffer, query_z_buffer, query_sdf_buffer, query_min_pos, query_max_pos);
}
}
@ -326,8 +327,8 @@ inline void query_sdf(VoxelGenerator &generator, const VoxelDataLodMap *voxel_da
// Sample voxels inside the cell to compute a tile of world space normals from the SDF.
void compute_normalmap(ICellIterator &cell_iterator, Span<const Vector3f> mesh_vertices,
Span<const Vector3f> mesh_normals, Span<const int> mesh_indices, NormalMapData &normal_map_data,
unsigned int tile_resolution, VoxelGenerator &generator, const VoxelDataLodMap *voxel_data,
Vector3i origin_in_voxels, unsigned int lod_index, bool octahedral_encoding) {
unsigned int tile_resolution, VoxelGenerator &generator, const VoxelData *voxel_data, Vector3i origin_in_voxels,
unsigned int lod_index, bool octahedral_encoding) {
ZN_PROFILE_SCOPE();
ZN_ASSERT_RETURN(generator.supports_series_generation());

View File

@ -19,7 +19,7 @@ class Image;
namespace zylann::voxel {
class VoxelGenerator;
struct VoxelDataLodMap;
class VoxelData;
// TODO This system could be extended to more than just normals
// - Texturing data
@ -85,8 +85,8 @@ public:
// Sample voxels inside the cell to compute a tile of world space normals from the SDF.
void compute_normalmap(ICellIterator &cell_iterator, Span<const Vector3f> mesh_vertices,
Span<const Vector3f> mesh_normals, Span<const int> mesh_indices, NormalMapData &normal_map_data,
unsigned int tile_resolution, VoxelGenerator &generator, const VoxelDataLodMap *voxel_data,
Vector3i origin_in_voxels, unsigned int lod_index, bool octahedral_encoding);
unsigned int tile_resolution, VoxelGenerator &generator, const VoxelData *voxel_data, Vector3i origin_in_voxels,
unsigned int lod_index, bool octahedral_encoding);
struct NormalMapImages {
#ifdef VOXEL_VIRTUAL_TEXTURE_USE_TEXTURE_ARRAY

View File

@ -1,6 +1,6 @@
#include "generate_block_task.h"
#include "../storage/voxel_buffer_internal.h"
#include "../storage/voxel_data_map.h"
#include "../storage/voxel_data.h"
#include "../util/godot/funcs.h"
#include "../util/log.h"
#include "../util/profiling.h"
@ -45,7 +45,7 @@ void GenerateBlockTask::run(zylann::ThreadedTaskContext ctx) {
max_lod_hint = result.max_lod_hint;
if (data != nullptr) {
data->modifiers.apply(
data->get_modifiers().apply(
query_data.voxel_buffer, AABB(query_data.origin_in_voxels, query_data.voxel_buffer.get_size() << lod));
}

View File

@ -8,7 +8,7 @@
namespace zylann::voxel {
struct VoxelDataLodMap;
class VoxelData;
class GenerateBlockTask : public IThreadedTask {
public:
@ -33,7 +33,7 @@ public:
bool drop_beyond_max_distance = true;
PriorityDependency priority_dependency;
std::shared_ptr<StreamingDependency> stream_dependency;
std::shared_ptr<VoxelDataLodMap> data;
std::shared_ptr<VoxelData> data;
std::shared_ptr<AsyncDependencyTracker> tracker;
};

View File

@ -24,7 +24,7 @@ public:
std::vector<Vector3f> mesh_normals;
std::vector<int> mesh_indices;
Ref<VoxelGenerator> generator;
std::shared_ptr<VoxelDataLodMap> voxel_data;
std::shared_ptr<VoxelData> voxel_data;
Vector3i mesh_block_size;
uint8_t lod_index;
NormalMapSettings virtual_texture_settings;

View File

@ -1,6 +1,6 @@
#include "mesh_block_task.h"
#include "../meshers/transvoxel/voxel_mesher_transvoxel.h"
#include "../storage/voxel_data_map.h"
#include "../storage/voxel_data.h"
#include "../terrain/voxel_mesh_block.h"
#include "../util/dstack.h"
#include "../util/godot/mesh.h"
@ -133,7 +133,7 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
for (unsigned int box_index = 0; box_index < count; ++box_index) {
Box3i box = boxes_to_generate[box_index];
box.difference(block_box, boxes_to_generate);
box.difference_to_vec(block_box, boxes_to_generate);
#ifdef DEBUG_ENABLED
CRASH_COND(box_index >= boxes_to_generate.size());
#endif
@ -259,7 +259,7 @@ void MeshBlockTask::run(zylann::ThreadedTaskContext ctx) {
const unsigned int min_padding = mesher->get_minimum_padding();
const unsigned int max_padding = mesher->get_maximum_padding();
const VoxelModifierStack *modifiers = data != nullptr ? &data->modifiers : nullptr;
const VoxelModifierStack *modifiers = data != nullptr ? &data->get_modifiers() : nullptr;
VoxelBufferInternal voxels;
copy_block_and_neighbors(to_span(blocks, blocks_count), voxels, min_padding, max_padding,

View File

@ -10,6 +10,8 @@
namespace zylann::voxel {
class VoxelData;
// Asynchronous task generating a mesh from voxel blocks and their neighbors, in a particular volume
class MeshBlockTask : public IThreadedTask {
public:
@ -39,7 +41,7 @@ public:
bool require_virtual_texture = false;
PriorityDependency priority_dependency;
std::shared_ptr<MeshingDependency> meshing_dependency;
std::shared_ptr<VoxelDataLodMap> data;
std::shared_ptr<VoxelData> data;
NormalMapSettings virtual_texture_settings;
private:

View File

@ -36,6 +36,7 @@ public:
std::vector<uint8_t> mesh_material_indices;
// In mesh block coordinates
Vector3i position;
// TODO Rename lod_index
uint8_t lod;
// Tells if the mesh resource was built as part of the task. If not, you need to build it on the main thread.
bool has_mesh_resource;
@ -57,6 +58,7 @@ public:
std::shared_ptr<VoxelBufferInternal> voxels;
UniquePtr<InstanceBlockData> instances;
Vector3i position;
// TODO Rename lod_index
uint8_t lod;
bool dropped;
bool max_lod_hint;

View File

@ -2,7 +2,7 @@
#include "../../engine/voxel_engine.h"
#include "../../generators/voxel_generator.h"
#include "../../storage/voxel_buffer_gd.h"
#include "../../storage/voxel_data_map.h"
#include "../../storage/voxel_data.h"
#include "../../thirdparty/meshoptimizer/meshoptimizer.h"
#include "../../util/godot/funcs.h"
#include "../../util/math/conv.h"
@ -166,17 +166,18 @@ static void simplify(const transvoxel::MeshArrays &src_mesh, transvoxel::MeshArr
struct DeepSampler : transvoxel::IDeepSDFSampler {
VoxelGenerator &generator;
const VoxelDataLodMap &data;
const VoxelData &data;
const VoxelBufferInternal::ChannelId sdf_channel;
const Vector3i origin;
DeepSampler(VoxelGenerator &p_generator, const VoxelDataLodMap &p_data,
VoxelBufferInternal::ChannelId p_sdf_channel, Vector3i p_origin) :
DeepSampler(VoxelGenerator &p_generator, const VoxelData &p_data, VoxelBufferInternal::ChannelId p_sdf_channel,
Vector3i p_origin) :
generator(p_generator), data(p_data), sdf_channel(p_sdf_channel), origin(p_origin) {}
float get_single(Vector3i position_in_voxels, uint32_t lod_index) const override {
position_in_voxels += origin;
const Vector3i lod_pos = position_in_voxels >> lod_index;
return data.get_voxel_f(position_in_voxels, sdf_channel);
/*const Vector3i lod_pos = position_in_voxels >> lod_index;
const VoxelDataLodMap::Lod &lod = data.lods[lod_index];
unsigned int bsm = 0;
std::shared_ptr<VoxelBufferInternal> voxels;
@ -197,7 +198,7 @@ struct DeepSampler : transvoxel::IDeepSDFSampler {
return voxels->get_voxel_f(lod_pos & bsm, sdf_channel);
} else {
return generator.generate_single(position_in_voxels, sdf_channel).f;
}
}*/
}
};

View File

@ -16,7 +16,7 @@ class VoxelBuffer;
class VoxelBufferInternal;
class VoxelGenerator;
struct VoxelDataLodMap;
class VoxelData;
// Base class for algorithms that generate meshes from voxels.
class VoxelMesher : public Resource {
@ -28,7 +28,7 @@ public:
// When using LOD, some meshers can use the generator and edited voxels to affine results.
// If not provided, the mesher will only use `voxels`.
VoxelGenerator *generator = nullptr;
const VoxelDataLodMap *data = nullptr;
const VoxelData *data = nullptr;
// Origin of the block is required when doing deep sampling.
Vector3i origin_in_voxels;
// LOD index. 0 means highest detail. 1 means half detail etc.

View File

@ -234,6 +234,7 @@ void initialize_voxel_module(ModuleInitializationLevel p_level) {
ZN_PRINT_VERBOSE(format("Size of VoxelLodTerrain: {}", sizeof(VoxelLodTerrain)));
ZN_PRINT_VERBOSE(format("Size of VoxelInstancer: {}", sizeof(VoxelInstancer)));
ZN_PRINT_VERBOSE(format("Size of VoxelDataMap: {}", sizeof(VoxelDataMap)));
ZN_PRINT_VERBOSE(format("Size of VoxelData: {}", sizeof(VoxelData)));
ZN_PRINT_VERBOSE(format("Size of VoxelMesher::Output: {}", sizeof(VoxelMesher::Output)));
ZN_PRINT_VERBOSE(format("Size of VoxelEngine::BlockMeshOutput: {}", sizeof(VoxelEngine::BlockMeshOutput)));
if (RenderingDevice::get_singleton() != nullptr) {

View File

@ -42,8 +42,8 @@ void VoxelModifier::set_operation(Operation op) {
if (_volume == nullptr) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = _volume->get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
ZN_ASSERT_RETURN(modifier->is_sdf());
@ -64,8 +64,8 @@ void VoxelModifier::set_smoothness(float s) {
if (_volume == nullptr) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = _volume->get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
ZN_ASSERT_RETURN(modifier->is_sdf());
@ -91,8 +91,8 @@ void VoxelModifier::_notification(int p_what) {
_volume = volume;
if (_volume != nullptr) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = _volume->get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
const uint32_t id = modifiers.allocate_id();
zylann::voxel::VoxelModifier *modifier = create(modifiers, id);
@ -113,8 +113,8 @@ void VoxelModifier::_notification(int p_what) {
case Node::NOTIFICATION_UNPARENTED: {
if (_volume != nullptr) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = _volume->get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN_MSG(modifier != nullptr, "The modifier node wasn't linked properly");
post_edit_modifier(*_volume, modifier->get_aabb());
@ -126,8 +126,8 @@ void VoxelModifier::_notification(int p_what) {
case Node3D::NOTIFICATION_LOCAL_TRANSFORM_CHANGED: {
if (_volume != nullptr && is_inside_tree()) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = _volume->get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
@ -163,8 +163,8 @@ void VoxelModifier::_bind_methods() {
template <typename T>
T *get_modifier(VoxelLodTerrain &volume, uint32_t id, zylann::voxel::VoxelModifier::Type type) {
std::shared_ptr<VoxelDataLodMap> data = volume.get_storage();
VoxelModifierStack &modifiers = data->modifiers;
VoxelData &data = volume.get_storage();
VoxelModifierStack &modifiers = data.get_modifiers();
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(id);
ZN_ASSERT_RETURN_V(modifier != nullptr, nullptr);
ZN_ASSERT_RETURN_V(modifier->get_type() == type, nullptr);

640
storage/voxel_data.cpp Normal file
View File

@ -0,0 +1,640 @@
#include "voxel_data.h"
#include "../util/dstack.h"
#include "../util/math/conv.h"
#include "voxel_data_grid.h"
namespace zylann::voxel {
VoxelData::VoxelData() {}
VoxelData::~VoxelData() {}
void VoxelData::set_lod_count(unsigned int p_lod_count) {
ZN_ASSERT(p_lod_count < constants::MAX_LOD);
ZN_ASSERT(p_lod_count >= 1);
RWLockWrite wlock(_rw_lock);
if (p_lod_count == _lod_count) {
return;
}
_lod_count = p_lod_count;
// Not entirely required, but changing LOD count at runtime is rarely needed
reset_maps_no_lock();
}
void VoxelData::reset_maps() {
RWLockWrite wlock(_rw_lock);
reset_maps_no_lock();
}
void VoxelData::reset_maps_no_lock() {
for (unsigned int lod_index = 0; lod_index < _lods.size(); ++lod_index) {
Lod &data_lod = _lods[lod_index];
// Instance new maps if we have more lods, or clear them otherwise
if (lod_index < _lod_count) {
data_lod.map.create(data_lod.map.get_block_size_pow2(), lod_index);
} else {
data_lod.map.clear();
}
}
}
void VoxelData::set_bounds(Box3i bounds) {
RWLockWrite wlock(_rw_lock);
_bounds_in_voxels = bounds;
}
void VoxelData::set_generator(Ref<VoxelGenerator> generator) {
RWLockWrite wlock(_rw_lock);
_generator = generator;
}
void VoxelData::set_stream(Ref<VoxelStream> stream) {
RWLockWrite wlock(_rw_lock);
_stream = stream;
}
void VoxelData::set_streaming_enabled(bool enabled) {
_streaming_enabled = enabled;
}
inline VoxelSingleValue get_voxel_with_lock(VoxelBufferInternal &vb, Vector3i pos, unsigned int channel) {
VoxelSingleValue v;
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
RWLockRead rlock(vb.get_lock());
v.f = vb.get_voxel_f(pos.x, pos.y, pos.z, channel);
} else {
RWLockRead rlock(vb.get_lock());
v.i = vb.get_voxel(pos, channel);
}
return v;
}
// TODO Piggyback on `copy`? The implementation is quite complex, and it's not supposed to be an efficient use case
VoxelSingleValue VoxelData::get_voxel(Vector3i pos, unsigned int channel_index, VoxelSingleValue defval) const {
ZN_PROFILE_SCOPE();
if (!_bounds_in_voxels.contains(pos)) {
return defval;
}
Vector3i block_pos = pos >> get_block_size_po2();
bool generate = false;
if (_streaming_enabled) {
const Lod &data_lod0 = _lods[0];
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos, generate);
if (voxels == nullptr) {
// TODO We should be able to get a value if modifiers are used but not a base generator
Ref<VoxelGenerator> generator = get_generator();
if (generator.is_valid()) {
VoxelSingleValue value = generator->generate_single(pos, channel_index);
if (channel_index == VoxelBufferInternal::CHANNEL_SDF) {
float sdf = value.f;
_modifiers.apply(sdf, to_vec3(pos));
value.f = sdf;
}
return value;
}
} else {
const Vector3i rpos = data_lod0.map.to_local(pos);
return get_voxel_with_lock(*voxels, rpos, channel_index);
}
return defval;
} else {
// We might hit places where data isn't loaded, in this case we try to fallback on higher LOD indices
Vector3i voxel_pos = pos;
Ref<VoxelGenerator> generator = get_generator();
for (unsigned int lod_index = 0; lod_index < _lod_count; ++lod_index) {
const Lod &data_lod = _lods[lod_index];
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod, block_pos, generate);
if (voxels != nullptr) {
return get_voxel_with_lock(*voxels, data_lod.map.to_local(voxel_pos), channel_index);
} else if (generate) {
// TODO We should be able to get a value if modifiers are used but not a base generator
if (generator.is_valid()) {
VoxelSingleValue value = generator->generate_single(pos, channel_index);
if (channel_index == VoxelBufferInternal::CHANNEL_SDF) {
float sdf = value.f;
_modifiers.apply(sdf, to_vec3(pos));
value.f = sdf;
}
return value;
} else {
return defval;
}
}
// Fallback on lower LOD
block_pos = block_pos >> 1;
voxel_pos = voxel_pos >> 1;
}
return defval;
}
}
// TODO Piggyback on `paste`? The implementation is quite complex, and it's not supposed to be an efficient use case
bool VoxelData::try_set_voxel(uint64_t value, Vector3i pos, unsigned int channel_index) {
const Vector3i block_pos_lod0 = pos >> get_block_size_po2();
Lod &data_lod0 = _lods[0];
const Vector3i block_pos = data_lod0.map.voxel_to_block(pos);
bool can_generate = false;
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos, can_generate);
if (voxels == nullptr) {
if (_streaming_enabled && !can_generate) {
return false;
}
Ref<VoxelGenerator> generator = get_generator();
if (generator.is_valid()) {
voxels = make_shared_instance<VoxelBufferInternal>();
voxels->create(Vector3iUtil::create(get_block_size()));
VoxelGenerator::VoxelQueryData q{ *voxels, pos, 0 };
generator->generate_block(q);
_modifiers.apply(q.voxel_buffer, AABB(pos, q.voxel_buffer.get_size()));
RWLockWrite wlock(data_lod0.map_lock);
if (data_lod0.map.has_block(block_pos_lod0)) {
// A block was loaded by another thread, cancel our edit.
return false;
}
data_lod0.map.set_block_buffer(block_pos_lod0, voxels, true);
}
}
// If it turns out to be a problem, use CoW?
RWLockWrite lock(voxels->get_lock());
voxels->set_voxel(value, data_lod0.map.to_local(pos), channel_index);
// We don't update mips, this must be done by the caller
return true;
}
float VoxelData::get_voxel_f(Vector3i pos, unsigned int channel_index) const {
VoxelSingleValue defval;
defval.f = 1.f;
return get_voxel(pos, channel_index, defval).f;
}
bool VoxelData::try_set_voxel_f(real_t value, Vector3i pos, unsigned int channel_index) {
// TODO Handle format instead of hardcoding 16-bits
return try_set_voxel(snorm_to_s16(value), pos, channel_index);
}
void VoxelData::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const {
ZN_PROFILE_SCOPE();
const Lod &data_lod0 = _lods[0];
const VoxelModifierStack &modifiers = _modifiers;
Ref<VoxelGenerator> generator = get_generator();
if (is_streaming_enabled() && generator.is_valid()) {
struct GenContext {
VoxelGenerator &generator;
const VoxelModifierStack &modifiers;
};
GenContext gctx{ **generator, modifiers };
RWLockRead rlock(data_lod0.map_lock);
data_lod0.map.copy(min_pos, dst_buffer, channels_mask, &gctx,
[](void *callback_data, VoxelBufferInternal &voxels, Vector3i pos) {
// Suffixed with `2` because GCC warns it shadows a previous local...
GenContext *gctx2 = reinterpret_cast<GenContext *>(callback_data);
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
gctx2->generator.generate_block(q);
gctx2->modifiers.apply(voxels, AABB(pos, voxels.get_size()));
});
} else {
RWLockRead rlock(data_lod0.map_lock);
// TODO Apply modifiers
data_lod0.map.copy(min_pos, dst_buffer, channels_mask);
}
}
void VoxelData::paste(Vector3i min_pos, const VoxelBufferInternal &src_buffer, unsigned int channels_mask,
bool use_mask, uint64_t mask_value, bool create_new_blocks) {
ZN_PROFILE_SCOPE();
Lod &data_lod0 = _lods[0];
data_lod0.map.paste(min_pos, src_buffer, channels_mask, use_mask, mask_value, create_new_blocks);
}
bool VoxelData::is_area_loaded(const Box3i p_voxels_box) const {
if (is_streaming_enabled() == false) {
return true;
}
const Box3i voxel_box = p_voxels_box.clipped(get_bounds());
const Lod &data_lod0 = _lods[0];
{
RWLockRead rlock(data_lod0.map_lock);
const bool all_blocks_present = data_lod0.map.is_area_fully_loaded(voxel_box);
return all_blocks_present;
}
}
void VoxelData::pre_generate_box(Box3i voxel_box, Span<Lod> lods, unsigned int data_block_size, bool streaming,
unsigned int lod_count, Ref<VoxelGenerator> generator, VoxelModifierStack &modifiers) {
ZN_PROFILE_SCOPE();
//ERR_FAIL_COND_MSG(_full_load_mode == false, nullptr, "This function can only be used in full load mode");
struct Task {
Vector3i block_pos;
uint32_t lod_index;
std::shared_ptr<VoxelBufferInternal> voxels;
};
// TODO Optimize: thread_local pooling?
std::vector<Task> todo;
// We'll pack tasks per LOD so we'll have less locking to do
// TODO Optimize: thread_local pooling?
std::vector<unsigned int> count_per_lod;
// Find empty slots
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
const Box3i block_box = voxel_box.downscaled(data_block_size << lod_index);
//ZN_PRINT_VERBOSE(format("Preloading box {} at lod {} synchronously", block_box, lod_index));
Lod &data_lod = lods[lod_index];
const unsigned int prev_size = todo.size();
{
RWLockRead rlock(data_lod.map_lock);
block_box.for_each_cell([&data_lod, lod_index, &todo, streaming](Vector3i block_pos) {
// We don't check "loading blocks", because this function wants to complete the task right now.
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
if (streaming) {
// Non-resident blocks must not be touched because we don't know what's in them.
// We can generate caches if resident ones have no voxel data.
if (block != nullptr && !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
} else {
// We can generate anywhere voxel data is not in memory
if (block == nullptr || !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
}
});
}
count_per_lod.push_back(todo.size() - prev_size);
}
const Vector3i block_size = Vector3iUtil::create(data_block_size);
// Generate
for (unsigned int i = 0; i < todo.size(); ++i) {
Task &task = todo[i];
task.voxels = make_shared_instance<VoxelBufferInternal>();
task.voxels->create(block_size);
// TODO Format?
if (generator.is_valid()) {
ZN_PROFILE_SCOPE_NAMED("Generate");
VoxelGenerator::VoxelQueryData q{ //
*task.voxels, task.block_pos * (data_block_size << task.lod_index), task.lod_index
};
generator->generate_block(q);
modifiers.apply(q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << q.lod));
}
}
// Populate slots
unsigned int task_index = 0;
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
ZN_ASSERT(lod_index < count_per_lod.size());
const unsigned int count = count_per_lod[lod_index];
if (count > 0) {
const unsigned int end_task_index = task_index + count;
Lod &data_lod = lods[lod_index];
RWLockWrite wlock(data_lod.map_lock);
for (; task_index < end_task_index; ++task_index) {
Task &task = todo[task_index];
ZN_ASSERT(task.lod_index == lod_index);
const VoxelDataBlock *prev_block = data_lod.map.get_block(task.block_pos);
if (prev_block != nullptr && prev_block->has_voxels()) {
// Sorry, that block has been set in the meantime by another thread.
// We'll assume the block we just generated is redundant and discard it.
continue;
}
data_lod.map.set_block_buffer(task.block_pos, task.voxels, true);
}
}
}
}
void VoxelData::pre_generate_box(Box3i voxel_box) {
const unsigned int data_block_size = get_block_size();
const bool streaming = is_streaming_enabled();
const unsigned int lod_count = get_lod_count();
pre_generate_box(voxel_box, to_span(_lods), data_block_size, streaming, lod_count, get_generator(), _modifiers);
}
void VoxelData::clear_cached_blocks_in_voxel_area(Box3i p_voxel_box) {
const unsigned int lod_count = get_lod_count();
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
Lod &lod = _lods[lod_index];
RWLockRead rlock(lod.map_lock);
const Box3i blocks_box = p_voxel_box.downscaled(lod.map.get_block_size() << lod_index);
blocks_box.for_each_cell_zxy([&lod](const Vector3i bpos) {
VoxelDataBlock *block = lod.map.get_block(bpos);
if (block == nullptr || block->is_edited() || block->is_modified()) {
return;
}
block->clear_voxels();
});
}
}
void VoxelData::mark_area_modified(Box3i p_voxel_box, std::vector<Vector3i> *lod0_new_blocks_to_lod) {
const Box3i bbox = p_voxel_box.downscaled(get_block_size());
Lod &data_lod0 = _lods[0];
{
RWLockRead rlock(data_lod0.map_lock);
bbox.for_each_cell([this, &data_lod0, lod0_new_blocks_to_lod](Vector3i block_pos_lod0) {
VoxelDataBlock *block = data_lod0.map.get_block(block_pos_lod0);
// We can get null blocks due to the added padding...
//ERR_FAIL_COND(block == nullptr);
if (block == nullptr) {
return;
}
// We can get blocks without voxels in them due to the added padding...
if (!block->has_voxels()) {
return;
}
//RWLockWrite wlock(block->get_voxels_shared()->get_lock());
block->set_modified(true);
// TODO call `set_edited(true)` as well? Apparently it wasn't needed so far, but it's a bit confusing
// TODO That boolean is also modified by the threaded update task (always set to false)
if (!block->get_needs_lodding()) {
block->set_needs_lodding(true);
// This is what indirectly causes remeshing
if (lod0_new_blocks_to_lod != nullptr) {
lod0_new_blocks_to_lod->push_back(block_pos_lod0);
}
}
});
}
}
bool VoxelData::try_set_block_buffer(
Vector3i block_position, unsigned int lod_index, std::shared_ptr<VoxelBufferInternal> buffer, bool edited) {
Lod &data_lod = _lods[lod_index];
if (buffer->get_size() != Vector3iUtil::create(get_block_size())) {
// Voxel block size is incorrect, drop it
ERR_PRINT("Block is different from expected size");
return false;
}
// Store buffer
RWLockWrite wlock(data_lod.map_lock);
// TODO Expose `overwrite` as parameter?
VoxelDataBlock *block = data_lod.map.set_block_buffer(block_position, buffer, false);
CRASH_COND(block == nullptr);
block->set_edited(edited);
return true;
}
void VoxelData::set_empty_block_buffer(Vector3i block_position, unsigned int lod_index) {
Lod &data_lod = _lods[lod_index];
RWLockWrite wlock(data_lod.map_lock);
// TODO Expose `overwrite` as parameter?
VoxelDataBlock *block = data_lod.map.set_empty_block(block_position, false);
ZN_ASSERT(block != nullptr);
}
bool VoxelData::has_block(Vector3i bpos, unsigned int lod_index) const {
const Lod &data_lod = _lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
return data_lod.map.has_block(bpos);
}
unsigned int VoxelData::get_block_count() const {
unsigned int sum = 0;
const unsigned int lod_count = get_lod_count();
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
const Lod &lod = _lods[lod_index];
RWLockRead rlock(lod.map_lock);
sum += lod.map.get_block_count();
}
return sum;
}
void VoxelData::update_lods(Span<const Vector3i> modified_lod0_blocks, std::vector<BlockLocation> *out_updated_blocks) {
ZN_DSTACK();
ZN_PROFILE_SCOPE();
// Propagates edits performed so far to other LODs.
// These LODs must be currently in memory, otherwise terrain data will miss it.
// This is currently ensured by the fact we load blocks in a "pyramidal" way,
// i.e there is no way for a block to be loaded if its parent LOD isn't loaded already.
// In the future we may implement storing of edits to be applied later if blocks can't be found.
const int data_block_size = get_block_size();
const int data_block_size_po2 = get_block_size_po2();
const unsigned int lod_count = get_lod_count();
const bool streaming_enabled = is_streaming_enabled();
Ref<VoxelGenerator> generator = get_generator();
static thread_local FixedArray<std::vector<Vector3i>, constants::MAX_LOD> tls_blocks_to_process_per_lod;
// Make sure LOD0 gets updates even if _lod_count is 1
{
std::vector<Vector3i> &dst_lod0 = tls_blocks_to_process_per_lod[0];
dst_lod0.resize(modified_lod0_blocks.size());
memcpy(dst_lod0.data(), modified_lod0_blocks.data(), dst_lod0.size() * sizeof(Vector3i));
}
{
Lod &data_lod0 = _lods[0];
RWLockRead rlock(data_lod0.map_lock);
std::vector<Vector3i> &blocks_pending_lodding_lod0 = tls_blocks_to_process_per_lod[0];
for (unsigned int i = 0; i < blocks_pending_lodding_lod0.size(); ++i) {
const Vector3i data_block_pos = blocks_pending_lodding_lod0[i];
VoxelDataBlock *data_block = data_lod0.map.get_block(data_block_pos);
ERR_CONTINUE(data_block == nullptr);
data_block->set_needs_lodding(false);
if (out_updated_blocks != nullptr) {
out_updated_blocks->push_back(BlockLocation{ data_block_pos, 0 });
}
}
}
const int half_bs = data_block_size >> 1;
// Process downscales upwards in pairs of consecutive LODs.
// This ensures we don't process multiple times the same blocks.
// Only LOD0 is editable at the moment, so we'll downscale from there
for (uint8_t dst_lod_index = 1; dst_lod_index < lod_count; ++dst_lod_index) {
const uint8_t src_lod_index = dst_lod_index - 1;
std::vector<Vector3i> &src_lod_blocks_to_process = tls_blocks_to_process_per_lod[src_lod_index];
std::vector<Vector3i> &dst_lod_blocks_to_process = tls_blocks_to_process_per_lod[dst_lod_index];
//VoxelLodTerrainUpdateData::Lod &dst_lod = state.lods[dst_lod_index];
Lod &src_data_lod = _lods[src_lod_index];
RWLockRead src_data_lod_map_rlock(src_data_lod.map_lock);
Lod &dst_data_lod = _lods[dst_lod_index];
// TODO Could take long locking this, we may generate things first and assign to the map at the end.
// Besides, in per-block streaming mode, it is not needed because blocks are supposed to be present
RWLockRead wlock(dst_data_lod.map_lock);
for (unsigned int i = 0; i < src_lod_blocks_to_process.size(); ++i) {
const Vector3i src_bpos = src_lod_blocks_to_process[i];
const Vector3i dst_bpos = src_bpos >> 1;
VoxelDataBlock *src_block = src_data_lod.map.get_block(src_bpos);
VoxelDataBlock *dst_block = dst_data_lod.map.get_block(dst_bpos);
src_block->set_needs_lodding(false);
if (dst_block == nullptr) {
if (!streaming_enabled) {
// TODO Doing this on the main thread can be very demanding and cause a stall.
// We should find a way to make it asynchronous, not need mips, or not edit outside viewers area.
std::shared_ptr<VoxelBufferInternal> voxels = make_shared_instance<VoxelBufferInternal>();
voxels->create(Vector3iUtil::create(data_block_size));
VoxelGenerator::VoxelQueryData q{ //
*voxels, //
dst_bpos << (dst_lod_index + data_block_size_po2), //
dst_lod_index
};
if (generator.is_valid()) {
ZN_PROFILE_SCOPE_NAMED("Generate");
generator->generate_block(q);
}
_modifiers.apply(
q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << dst_lod_index));
dst_block = dst_data_lod.map.set_block_buffer(dst_bpos, voxels, true);
} else {
ERR_PRINT(String("Destination block {0} not found when cascading edits on LOD {1}")
.format(varray(dst_bpos, dst_lod_index)));
continue;
}
}
// The block and its lower LOD indices are expected to be available.
// Otherwise it means the function was called too late?
ZN_ASSERT(src_block != nullptr);
//ZN_ASSERT(dst_block != nullptr);
// The block should have voxels if it has been edited or mipped.
ZN_ASSERT(src_block->has_voxels());
if (out_updated_blocks != nullptr) {
out_updated_blocks->push_back(BlockLocation{ dst_bpos, dst_lod_index });
}
dst_block->set_modified(true);
if (dst_lod_index != lod_count - 1 && !dst_block->get_needs_lodding()) {
dst_block->set_needs_lodding(true);
dst_lod_blocks_to_process.push_back(dst_bpos);
}
const Vector3i rel = src_bpos - (dst_bpos << 1);
// Update lower LOD
// This must always be done after an edit before it gets saved, otherwise LODs won't match and it will look
// ugly.
// TODO Optimization: try to narrow to edited region instead of taking whole block
{
ZN_PROFILE_SCOPE_NAMED("Downscale");
RWLockRead rlock(src_block->get_voxels().get_lock());
src_block->get_voxels().downscale_to(
dst_block->get_voxels(), Vector3i(), src_block->get_voxels_const().get_size(), rel * half_bs);
}
}
src_lod_blocks_to_process.clear();
// No need to clear the last list because we never add blocks to it
}
// uint64_t time_spent = profiling_clock.restart();
// if (time_spent > 10) {
// print_line(String("Took {0} us to update lods").format(varray(time_spent)));
// }
}
void VoxelData::get_missing_blocks(
Span<const Vector3i> block_positions, unsigned int lod_index, std::vector<Vector3i> &out_missing) const {
const Lod &lod = _lods[lod_index];
RWLockRead rlock(lod.map_lock);
for (const Vector3i &pos : block_positions) {
if (!lod.map.has_block(pos)) {
out_missing.push_back(pos);
}
}
}
void VoxelData::get_missing_blocks(
Box3i p_blocks_box, unsigned int lod_index, std::vector<Vector3i> &out_missing) const {
const Lod &data_lod = _lods[lod_index];
const Box3i bounds_in_blocks = _bounds_in_voxels.downscaled(get_block_size());
const Box3i blocks_box = p_blocks_box.clipped(bounds_in_blocks);
RWLockRead rlock(data_lod.map_lock);
blocks_box.for_each_cell_zxy([&data_lod, &out_missing](Vector3i bpos) {
if (!data_lod.map.has_block(bpos)) {
out_missing.push_back(bpos);
}
});
}
unsigned int VoxelData::get_blocks_with_voxel_data(
Box3i p_blocks_box, unsigned int lod_index, Span<std::shared_ptr<VoxelBufferInternal>> out_blocks) const {
ZN_ASSERT(int64_t(out_blocks.size()) >= Vector3iUtil::get_volume(p_blocks_box.size));
const Lod &data_lod = _lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
unsigned int count = 0;
// Iteration order matters for thread access.
p_blocks_box.for_each_cell_zxy([&count, &data_lod, &out_blocks](Vector3i data_block_pos) {
const VoxelDataBlock *nblock = data_lod.map.get_block(data_block_pos);
// The block can actually be null on some occasions. Not sure yet if it's that bad
//CRASH_COND(nblock == nullptr);
if (nblock != nullptr && nblock->has_voxels()) {
out_blocks[count] = nblock->get_voxels_shared();
}
++count;
});
return count;
}
void VoxelData::get_blocks_grid(VoxelDataGrid &grid, Box3i box_in_voxels, unsigned int lod_index) const {
const Lod &data_lod = _lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
grid.reference_area(data_lod.map, box_in_voxels);
}
} // namespace zylann::voxel

304
storage/voxel_data.h Normal file
View File

@ -0,0 +1,304 @@
#ifndef VOXEL_DATA_H
#define VOXEL_DATA_H
#include "../generators/voxel_generator.h"
#include "../streams/voxel_stream.h"
#include "modifiers.h"
#include "voxel_data_map.h"
namespace zylann::voxel {
class VoxelDataGrid;
// Generic storage containing everything needed to access voxel data.
// Contains edits, procedural sources and file stream so voxels not physically stored in memory can be obtained.
// This does not contain meshing or instancing information, only voxels.
// Individual calls should be thread-safe.
class VoxelData {
public:
VoxelData();
~VoxelData();
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Configuration.
// Changing these settings while data is already loaded can be expensive, or cause data to be reset.
// If threaded tasks are still working on the data while this happens, they should be cancelled or ignored.
inline unsigned int get_block_size() const {
return _lods[0].map.get_block_size();
}
inline unsigned int get_block_size_po2() const {
return _lods[0].map.get_block_size_pow2();
}
inline Vector3i voxel_to_block(Vector3i pos) const {
return _lods[0].map.voxel_to_block(pos);
}
void set_lod_count(unsigned int p_lod_count);
// Clears voxel data. Keeps modifiers, generator and settings.
void reset_maps();
inline unsigned int get_lod_count() const {
RWLockRead rlock(_rw_lock);
return _lod_count;
}
void set_bounds(Box3i bounds);
inline Box3i get_bounds() const {
RWLockRead rlock(_rw_lock);
return _bounds_in_voxels;
}
void set_generator(Ref<VoxelGenerator> generator);
inline Ref<VoxelGenerator> get_generator() const {
RWLockRead rlock(_rw_lock);
return _generator;
}
void set_stream(Ref<VoxelStream> stream);
inline Ref<VoxelStream> get_stream() const {
RWLockRead rlock(_rw_lock);
return _stream;
}
inline VoxelModifierStack &get_modifiers() {
return _modifiers;
}
inline const VoxelModifierStack &get_modifiers() const {
return _modifiers;
}
void set_streaming_enabled(bool enabled);
inline bool is_streaming_enabled() const {
return _streaming_enabled;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Queries.
// When not specified, the used LOD index is 0.
VoxelSingleValue get_voxel(Vector3i pos, unsigned int channel_index, VoxelSingleValue defval) const;
bool try_set_voxel(uint64_t value, Vector3i pos, unsigned int channel_index);
float get_voxel_f(Vector3i pos, unsigned int channel_index) const;
bool try_set_voxel_f(real_t value, Vector3i pos, unsigned int channel_index);
void copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const;
void paste(Vector3i min_pos, const VoxelBufferInternal &src_buffer, unsigned int channels_mask, bool use_mask,
uint64_t mask_value, bool create_new_blocks);
bool is_area_loaded(const Box3i p_voxels_box) const;
// Executes a read+write operation on all voxels in the given area, on a specific channel.
// If the area intersects the boundaries of the volume, it will be clipped.
// If the area intersects blocks that aren't loaded, the operation will be cancelled.
// Returns the box of voxels which were effectively processed.
template <typename F>
Box3i write_box(const Box3i &p_voxel_box, unsigned int channel_index, F action) {
const Box3i voxel_box = p_voxel_box.clipped(get_bounds());
if (!is_area_loaded(voxel_box)) {
ZN_PRINT_VERBOSE("Area not editable");
return Box3i();
}
Ref<VoxelGenerator> generator = _generator;
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
{
RWLockWrite wlock(data_lod0.map_lock);
data_lod0.map.write_box(
voxel_box, channel_index, action, [&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
generator->generate_block(q);
}
});
}
return voxel_box;
}
// Executes a read+write operation on all voxels in the given area, on two specific channels.
// If the area intersects the boundaries of the volume, it will be clipped.
// If the area intersects blocks that aren't loaded, the operation will be cancelled.
// Returns the box of voxels which were effectively processed.
template <typename F>
Box3i write_box_2(const Box3i &p_voxel_box, unsigned int channel1_index, unsigned int channel2_index, F action) {
const Box3i voxel_box = p_voxel_box.clipped(get_bounds());
if (!is_area_loaded(voxel_box)) {
ZN_PRINT_VERBOSE("Area not editable");
return Box3i();
}
Ref<VoxelGenerator> generator = _generator;
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
{
RWLockWrite wlock(data_lod0.map_lock);
data_lod0.map.write_box_2(voxel_box, channel1_index, channel2_index, action,
[&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
generator->generate_block(q);
}
});
}
return voxel_box;
}
// Generates all non-present blocks in preparation for an edit.
// Every block intersecting with the box at every LOD will be checked.
// This function runs sequentially and should be thread-safe. May be used if blocks are immediately needed.
// It will block if other threads are accessing the same data.
// WARNING: this does not check if the area is editable.
void pre_generate_box(Box3i voxel_box);
// Clears voxel data from blocks that are pure results of generators and modifiers.
// WARNING: this does not check if the area is editable.
void clear_cached_blocks_in_voxel_area(Box3i p_voxel_box);
// Flags all blocks in the given area as modified at LOD0.
// Also marks them as requiring LOD updates (if lod count is 1 this has no effect).
// Optionally, returns a list of affected block positions which did not require LOD updates before.
void mark_area_modified(Box3i p_voxel_box, std::vector<Vector3i> *lod0_new_blocks_to_lod);
// Sets voxel data at a block position. Also sets wether this is edited data (otherwise it is cached generator
// results).
// If the block has different size than expected, returns false and doesn't set the data.
// If the block already exists, it will not be overwritten, but still returns true.
// Otherwise, returns true.
// TODO Might need to expose a parameter for the overwriting behavior.
bool try_set_block_buffer(
Vector3i block_position, unsigned int lod_index, std::shared_ptr<VoxelBufferInternal> buffer, bool edited);
// Sets empty voxel data at a block position. It means this block is known to have no edits and no cached generator
// data.
// If the block already exists, it is not overwritten.
// TODO Might need to expose a parameter for the overwriting behavior.
void set_empty_block_buffer(Vector3i block_position, unsigned int lod_index);
template <typename F>
void for_each_block(F op) {
for (unsigned int lod_index = 0; lod_index < _lod_count; ++lod_index) {
Lod &lod = _lods[lod_index];
RWLockRead rlock(lod.map_lock);
lod.map.for_each_block(op);
}
}
template <typename F>
void for_each_block_at_lod(F op, unsigned int lod_index) const {
const Lod &lod = _lods[lod_index];
RWLockRead rlock(lod.map_lock);
lod.map.for_each_block(op);
}
// Tests if a block exists at the specified block position and LOD index.
// This is mainly used for debugging so it isn't optimal, don't use this if you plan to query many blocks.
bool has_block(Vector3i bpos, unsigned int lod_index) const;
// Gets the total amount of allocated blocks. This includes blocks having no voxel data.
unsigned int get_block_count() const;
struct BlockLocation {
Vector3i position;
uint32_t lod_index;
};
// Updates the LODs of all blocks at given positions, and resets their flags telling that they need LOD updates.
// Optionally, returns a list of affected block positions.
void update_lods(Span<const Vector3i> modified_lod0_blocks, std::vector<BlockLocation> *out_updated_blocks);
// void action(VoxelDataBlock &block, Vector3i bpos)
template <typename F>
void unload_blocks(Box3i bbox, unsigned int lod_index, F action) {
Lod &lod = _lods[lod_index];
RWLockWrite wlock(lod.map_lock);
bbox.for_each_cell_zxy([&lod, &action](Vector3i bpos) {
lod.map.remove_block(bpos, [&action, bpos](VoxelDataBlock &block) { action(block, bpos); });
});
}
// Gets missing blocks out of the given block positions.
// WARNING: positions outside bounds will be considered missing too.
// TODO Don't consider positions outside bounds to be missing? This is only a byproduct of migrating old code.
// It doesnt check this because the code using this function already does it (a bit more efficiently, but still).
void get_missing_blocks(
Span<const Vector3i> block_positions, unsigned int lod_index, std::vector<Vector3i> &out_missing) const;
// Gets missing blocks out of the given area in block coordinates.
// If the area intersects the outside of the bounds, it will be clipped.
void get_missing_blocks(Box3i p_blocks_box, unsigned int lod_index, std::vector<Vector3i> &out_missing) const;
unsigned int get_blocks_with_voxel_data(
Box3i p_blocks_box, unsigned int lod_index, Span<std::shared_ptr<VoxelBufferInternal>> out_blocks) const;
void get_blocks_grid(VoxelDataGrid &grid, Box3i box_in_voxels, unsigned int lod_index) const;
private:
void reset_maps_no_lock();
struct Lod {
// Storage for edited and cached voxels.
VoxelDataMap map;
// This lock should be locked in write mode only when the map gets modified (adding or removing blocks).
// Otherwise it may be locked in read mode.
// It is possible to unlock it after we are done querying the map.
RWLock map_lock;
};
static void pre_generate_box(Box3i voxel_box, Span<Lod> lods, unsigned int data_block_size, bool streaming,
unsigned int lod_count, Ref<VoxelGenerator> generator, VoxelModifierStack &modifiers);
static inline std::shared_ptr<VoxelBufferInternal> try_get_voxel_buffer_with_lock(
const Lod &data_lod, Vector3i block_pos, bool &out_generate) {
RWLockRead rlock(data_lod.map_lock);
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
if (block == nullptr) {
return nullptr;
}
// TODO Thread-safety: this checking presence of voxels is not safe.
// It can change while meshing takes place if a modifier is moved in the same area,
// because it invalidates cached data (that doesn't require locking the map, and doesn't lock a VoxelBuffer,
// so there is no sync going on). One way to fix this is to implement a spatial lock.
if (!block->has_voxels()) {
out_generate = true;
return nullptr;
}
return block->get_voxels_shared();
}
// Each LOD works in a set of coordinates spanning 2x more voxels the higher their index is.
// LOD 0 is the primary storage for edited data. Higher indices are "mip-maps".
// A fixed array is used because it's often a small one, and it doesn't require locking by threads.
// Note that these LODs do not automatically update, it is up to users of the class to do this job.
FixedArray<Lod, constants::MAX_LOD> _lods;
Box3i _bounds_in_voxels;
uint8_t _lod_count = 1;
// If enabled, some data blocks can have the "not loaded" and "loaded" status. Which means we can't assume what they
// contain, until we load them from the stream.
// If disabled, all edits are loaded in memory, and we know if a block isn't stored, it means we can use the
// generator and modifiers to obtain its data.
// This mostly changes how this class is used, streaming itself is not directly implemented in this class.
bool _streaming_enabled = true;
// Procedural generation stack
VoxelModifierStack _modifiers;
Ref<VoxelGenerator> _generator;
// Persistent storage (file(s)).
Ref<VoxelStream> _stream;
// This should be locked when accessing configuration members.
RWLock _rw_lock;
};
} // namespace zylann::voxel
#endif // VOXEL_DATA_H

View File

@ -334,7 +334,7 @@ bool VoxelDataMap::is_area_fully_loaded(const Box3i voxels_box) const {
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming) {
ZN_PROFILE_SCOPE();
//ERR_FAIL_COND_MSG(_full_load_mode == false, nullptr, "This function can only be used in full load mode");
@ -442,5 +442,5 @@ void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box)
});
}
}
*/
} // namespace zylann::voxel

View File

@ -212,7 +212,7 @@ private:
unsigned int _lod_index = 0;
};
struct VoxelDataLodMap {
/*struct VoxelDataLodMap {
struct Lod {
VoxelDataMap map;
// This lock should be locked in write mode only when the map gets modified (adding or removing blocks).
@ -233,7 +233,7 @@ struct VoxelDataLodMap {
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming);
// Clears voxel data from blocks that are pure results of generators and modifiers.
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box);
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box);*/
} // namespace zylann::voxel

View File

@ -131,7 +131,7 @@ VoxelLodTerrain::VoxelLodTerrain() {
ZN_PRINT_VERBOSE("Construct VoxelLodTerrain");
_data = make_shared_instance<VoxelDataLodMap>();
_data = make_shared_instance<VoxelData>();
_update_data = make_shared_instance<VoxelLodTerrainUpdateData>();
_update_data->task_is_complete = true;
_streaming_dependency = make_shared_instance<StreamingDependency>();
@ -143,8 +143,7 @@ VoxelLodTerrain::VoxelLodTerrain() {
set_process_callback(_process_callback);
// Infinite by default
_update_data->settings.bounds_in_voxels =
Box3i::from_center_extents(Vector3i(), Vector3iUtil::create(constants::MAX_VOLUME_EXTENT));
_data->set_bounds(Box3i::from_center_extents(Vector3i(), Vector3iUtil::create(constants::MAX_VOLUME_EXTENT)));
// Mesh updates are spread over frames by scheduling them in a task runner of VoxelEngine,
// but instead of using a reception buffer we use a callback,
@ -231,11 +230,11 @@ void VoxelLodTerrain::set_material(Ref<Material> p_material) {
}
unsigned int VoxelLodTerrain::get_data_block_size() const {
return _data->lods[0].map.get_block_size();
return _data->get_block_size();
}
unsigned int VoxelLodTerrain::get_data_block_size_pow2() const {
return _data->lods[0].map.get_block_size_pow2();
return _data->get_block_size_po2();
}
unsigned int VoxelLodTerrain::get_mesh_block_size_pow2() const {
@ -247,13 +246,13 @@ unsigned int VoxelLodTerrain::get_mesh_block_size() const {
}
void VoxelLodTerrain::set_stream(Ref<VoxelStream> p_stream) {
if (p_stream == _stream) {
if (p_stream == get_stream()) {
return;
}
_stream = p_stream;
_data->set_stream(p_stream);
StreamingDependency::reset(_streaming_dependency, _stream, _generator);
StreamingDependency::reset(_streaming_dependency, p_stream, get_generator());
#ifdef TOOLS_ENABLED
if (p_stream.is_valid()) {
@ -273,18 +272,18 @@ void VoxelLodTerrain::set_stream(Ref<VoxelStream> p_stream) {
}
Ref<VoxelStream> VoxelLodTerrain::get_stream() const {
return _stream;
return _data->get_stream();
}
void VoxelLodTerrain::set_generator(Ref<VoxelGenerator> p_generator) {
if (p_generator == _generator) {
if (p_generator == get_generator()) {
return;
}
_generator = p_generator;
_data->set_generator(p_generator);
MeshingDependency::reset(_meshing_dependency, _mesher, p_generator);
StreamingDependency::reset(_streaming_dependency, _stream, p_generator);
StreamingDependency::reset(_streaming_dependency, get_stream(), p_generator);
#ifdef TOOLS_ENABLED
if (p_generator.is_valid()) {
@ -304,7 +303,7 @@ void VoxelLodTerrain::set_generator(Ref<VoxelGenerator> p_generator) {
}
Ref<VoxelGenerator> VoxelLodTerrain::get_generator() const {
return _generator;
return _data->get_generator();
}
void VoxelLodTerrain::_on_gi_mode_changed() {
@ -335,7 +334,7 @@ void VoxelLodTerrain::set_mesher(Ref<VoxelMesher> p_mesher) {
update_shader_material_pool_template();
MeshingDependency::reset(_meshing_dependency, _mesher, _generator);
MeshingDependency::reset(_meshing_dependency, _mesher, get_generator());
if (_mesher.is_valid()) {
start_updater();
@ -353,7 +352,9 @@ void VoxelLodTerrain::_on_stream_params_changed() {
stop_streamer();
stop_updater();
if (_stream.is_valid()) {
Ref<VoxelStream> stream = get_stream();
if (stream.is_valid()) {
//const int stream_block_size_po2 = _stream->get_block_size_po2();
//_set_block_size_po2(stream_block_size_po2);
@ -361,10 +362,10 @@ void VoxelLodTerrain::_on_stream_params_changed() {
// const int stream_lod_count = _stream->get_lod_count();
// _set_lod_count(min(stream_lod_count, get_lod_count()));
if (_update_data->settings.full_load_mode && !_stream->supports_loading_all_blocks()) {
if (is_full_load_mode_enabled() && !stream->supports_loading_all_blocks()) {
ERR_PRINT("The chosen stream does not supports loading all blocks. Full load mode cannot be used.");
_update_data->wait_for_end_of_task();
_update_data->settings.full_load_mode = false;
_data->set_streaming_enabled(true);
#ifdef TOOLS_ENABLED
notify_property_list_changed();
#endif
@ -379,7 +380,9 @@ void VoxelLodTerrain::_on_stream_params_changed() {
// also this code isn't right, it doesnt update the other lods
//_data->lods[0].map.create(p_block_size_po2, 0);
if ((_stream.is_valid() || _generator.is_valid()) &&
Ref<VoxelGenerator> generator = get_generator();
if ((stream.is_valid() || generator.is_valid()) &&
(Engine::get_singleton()->is_editor_hint() == false || _update_data->settings.run_stream_in_editor)) {
start_streamer();
start_updater();
@ -389,7 +392,8 @@ void VoxelLodTerrain::_on_stream_params_changed() {
_update_data->state.force_update_octrees_next_update = true;
// The whole map might change, so make all area dirty
for (unsigned int i = 0; i < _update_data->settings.lod_count; ++i) {
const unsigned int lod_count = get_lod_count();
for (unsigned int i = 0; i < lod_count; ++i) {
VoxelLodTerrainUpdateData::Lod &lod = _update_data->state.lods[i];
lod.last_view_distance_data_blocks = 0;
lod.last_view_distance_mesh_blocks = 0;
@ -434,20 +438,22 @@ void VoxelLodTerrain::set_mesh_block_size(unsigned int mesh_block_size) {
}
// Update voxel bounds because block size change can affect octree size
set_voxel_bounds(_update_data->settings.bounds_in_voxels);
set_voxel_bounds(_data->get_bounds());
}
void VoxelLodTerrain::set_full_load_mode_enabled(bool enabled) {
if (enabled != _update_data->settings.full_load_mode) {
const bool streaming_enabled = !enabled;
if (streaming_enabled != _data->is_streaming_enabled()) {
_update_data->wait_for_end_of_task();
_update_data->settings.full_load_mode = enabled;
//_update_data->settings.full_load_mode = enabled;
_data->set_streaming_enabled(streaming_enabled);
_update_data->state.force_update_octrees_next_update = true;
_on_stream_params_changed();
}
}
bool VoxelLodTerrain::is_full_load_mode_enabled() const {
return _update_data->settings.full_load_mode;
return !_data->is_streaming_enabled();
}
void VoxelLodTerrain::set_threaded_update_enabled(bool enabled) {
@ -523,7 +529,8 @@ void VoxelLodTerrain::set_mesh_block_active(VoxelMeshBlockVLT &block, bool activ
}
}
bool VoxelLodTerrain::is_area_editable(Box3i p_voxel_box) const {
/*bool VoxelLodTerrain::is_area_editable(Box3i p_voxel_box) const {
return _data->is_area_loaded(p_voxel_box);
if (_update_data->settings.full_load_mode) {
return true;
}
@ -534,9 +541,9 @@ bool VoxelLodTerrain::is_area_editable(Box3i p_voxel_box) const {
const bool all_blocks_present = data_lod0.map.is_area_fully_loaded(voxel_box);
return all_blocks_present;
}
}
}*/
inline std::shared_ptr<VoxelBufferInternal> try_get_voxel_buffer_with_lock(
/*inline std::shared_ptr<VoxelBufferInternal> try_get_voxel_buffer_with_lock(
const VoxelDataLodMap::Lod &data_lod, Vector3i block_pos, bool &out_generate) {
RWLockRead rlock(data_lod.map_lock);
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
@ -548,9 +555,9 @@ inline std::shared_ptr<VoxelBufferInternal> try_get_voxel_buffer_with_lock(
return nullptr;
}
return block->get_voxels_shared();
}
}*/
inline VoxelSingleValue get_voxel_with_lock(VoxelBufferInternal &vb, Vector3i pos, unsigned int channel) {
/*inline VoxelSingleValue get_voxel_with_lock(VoxelBufferInternal &vb, Vector3i pos, unsigned int channel) {
VoxelSingleValue v;
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
RWLockRead rlock(vb.get_lock());
@ -560,9 +567,9 @@ inline VoxelSingleValue get_voxel_with_lock(VoxelBufferInternal &vb, Vector3i po
v.i = vb.get_voxel(pos, channel);
}
return v;
}
}*/
VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval) {
/*VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval) {
if (!_update_data->settings.bounds_in_voxels.contains(pos)) {
return defval;
}
@ -628,9 +635,9 @@ VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel,
}
return defval;
}
}
}*/
bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value) {
/*bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value) {
const Vector3i block_pos_lod0 = pos >> get_data_block_size_pow2();
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
const Vector3i block_pos = data_lod0.map.voxel_to_block(pos);
@ -661,9 +668,9 @@ bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int ch
voxels->set_voxel(value, data_lod0.map.to_local(pos), channel);
// We don't update mips, this must be done by the caller
return true;
}
}*/
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) {
/*void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) {
ZN_PROFILE_SCOPE();
const VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
VoxelModifierStack &modifiers = _data->modifiers;
@ -690,14 +697,14 @@ void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_bu
// TODO Apply modifiers
data_lod0.map.copy(p_origin_voxels, dst_buffer, channels_mask);
}
}
}*/
void VoxelLodTerrain::paste(
/*void VoxelLodTerrain::paste(
Vector3i p_origin_voxels, const VoxelBufferInternal &src_buffer, unsigned int channels_mask) {
ZN_PROFILE_SCOPE();
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
data_lod0.map.paste(p_origin_voxels, src_buffer, channels_mask, false, 0, false);
}
}*/
// Marks intersecting blocks in the area as modified, updates LODs and schedules remeshing.
// The provided box must be at LOD0 coordinates.
@ -711,8 +718,12 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
// For now, this is worked around by ignoring cases where blocks are null,
// But it might mip more lods than necessary when editing on borders.
const Box3i box = p_box.padded(1);
const Box3i bbox = box.downscaled(get_data_block_size());
{
MutexLock lock(_update_data->state.blocks_pending_lodding_lod0_mutex);
_data->mark_area_modified(box, &_update_data->state.blocks_pending_lodding_lod0);
}
/*const Box3i bbox = box.downscaled(get_data_block_size());
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
{
RWLockRead rlock(data_lod0.map_lock);
@ -741,7 +752,7 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
_update_data->state.blocks_pending_lodding_lod0.push_back(block_pos_lod0);
}
});
}
}*/
#ifdef TOOLS_ENABLED
if (debug_is_draw_enabled() && debug_get_draw_flag(DEBUG_DRAW_EDIT_BOXES)) {
@ -755,7 +766,8 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
}
void VoxelLodTerrain::post_edit_modifiers(Box3i p_voxel_box) {
clear_cached_blocks_in_voxel_area(*_data, p_voxel_box);
//clear_cached_blocks_in_voxel_area(*_data, p_voxel_box);
_data->clear_cached_blocks_in_voxel_area(p_voxel_box);
// Not sure if it is worth re-caching these blocks. We may see about that in the future if performance is an issue.
MutexLock lock(_update_data->state.changed_generated_areas_mutex);
@ -820,7 +832,7 @@ void VoxelLodTerrain::start_updater() {
void VoxelLodTerrain::stop_updater() {
// Invalidate pending tasks
MeshingDependency::reset(_meshing_dependency, _mesher, _generator);
MeshingDependency::reset(_meshing_dependency, _mesher, get_generator());
// VoxelEngine::get_singleton().set_volume_mesher(_volume_id, Ref<VoxelMesher>());
// TODO We can still receive a few mesh delayed mesh updates after this. Is it a problem?
@ -845,7 +857,7 @@ void VoxelLodTerrain::start_streamer() {
// VoxelEngine::get_singleton().set_volume_stream(_volume_id, _stream);
// VoxelEngine::get_singleton().set_volume_generator(_volume_id, _generator);
if (_update_data->settings.full_load_mode && _stream.is_valid()) {
if (is_full_load_mode_enabled() && get_stream().is_valid()) {
// TODO May want to defer this to be sure it's not done multiple times.
// This would be a side-effect of setting properties one by one, either by scene loader or by script
@ -913,7 +925,7 @@ void VoxelLodTerrain::_set_lod_count(int p_lod_count) {
_update_data->wait_for_end_of_task();
_update_data->settings.lod_count = p_lod_count;
_data->set_lod_count(p_lod_count);
_update_data->state.force_update_octrees_next_update = true;
LodOctree::NoDestroyAction nda;
@ -940,9 +952,8 @@ void VoxelLodTerrain::reset_maps() {
_update_data->wait_for_end_of_task();
const unsigned int lod_count = _update_data->settings.lod_count;
VoxelLodTerrainUpdateData::State &state = _update_data->state;
_data->reset_maps();
/*
// Make a new one, so if threads still reference the old one it will be a different copy
std::shared_ptr<VoxelDataLodMap> new_data = make_shared_instance<VoxelDataLodMap>();
// Keep modifiers, we only reset voxel data
@ -960,6 +971,7 @@ void VoxelLodTerrain::reset_maps() {
data_lod.map.clear();
}
}
*/
abort_async_edits();
@ -969,7 +981,7 @@ void VoxelLodTerrain::reset_maps() {
void VoxelLodTerrain::reset_mesh_maps() {
_update_data->wait_for_end_of_task();
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
VoxelLodTerrainUpdateData::State &state = _update_data->state;
for (unsigned int lod_index = 0; lod_index < state.lods.size(); ++lod_index) {
@ -1021,7 +1033,7 @@ void VoxelLodTerrain::reset_mesh_maps() {
}
int VoxelLodTerrain::get_lod_count() const {
return _update_data->settings.lod_count;
return _data->get_lod_count();
}
void VoxelLodTerrain::set_generate_collisions(bool enabled) {
@ -1042,7 +1054,7 @@ int VoxelLodTerrain::get_collision_lod_count() const {
}
void VoxelLodTerrain::set_collision_layer(int layer) {
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
_collision_layer = layer;
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
@ -1058,7 +1070,7 @@ int VoxelLodTerrain::get_collision_layer() const {
}
void VoxelLodTerrain::set_collision_mask(int mask) {
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
_collision_mask = mask;
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
@ -1074,7 +1086,7 @@ int VoxelLodTerrain::get_collision_mask() const {
}
void VoxelLodTerrain::set_collision_margin(float margin) {
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
_collision_margin = margin;
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
@ -1100,8 +1112,7 @@ int VoxelLodTerrain::get_mesh_block_region_extent() const {
Vector3i VoxelLodTerrain::voxel_to_data_block_position(Vector3 vpos, int lod_index) const {
ERR_FAIL_COND_V(lod_index < 0, Vector3i());
ERR_FAIL_COND_V(lod_index >= get_lod_count(), Vector3i());
const VoxelDataLodMap::Lod &lod = _data->lods[lod_index];
const Vector3i bpos = lod.map.voxel_to_block(math::floor_to_int(vpos)) >> lod_index;
const Vector3i bpos = _data->voxel_to_block(math::floor_to_int(vpos)) >> lod_index;
return bpos;
}
@ -1336,8 +1347,9 @@ void VoxelLodTerrain::apply_main_thread_update_tasks() {
// and use the camera for them.
const LocalCameraInfo camera = get_local_camera_info();
const Transform3D volume_transform = get_global_transform();
const unsigned int lod_count = get_lod_count();
for (unsigned int lod_index = 0; lod_index < _update_data->settings.lod_count; ++lod_index) {
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
VoxelLodTerrainUpdateData::Lod &lod = _update_data->state.lods[lod_index];
VoxelMeshMap<VoxelMeshBlockVLT> &mesh_map = _mesh_maps_per_lod[lod_index];
std::unordered_set<const VoxelMeshBlockVLT *> activated_blocks;
@ -1522,7 +1534,7 @@ void VoxelLodTerrain::apply_data_block_response(VoxelEngine::BlockDataOutput &ob
return;
}
if (ob.lod >= _update_data->settings.lod_count) {
if (ob.lod >= get_lod_count()) {
// That block was requested at a time where LOD was higher... drop it
++_stats.dropped_block_loads;
return;
@ -1554,7 +1566,12 @@ void VoxelLodTerrain::apply_data_block_response(VoxelEngine::BlockDataOutput &ob
}
if (ob.voxels != nullptr) {
VoxelDataLodMap::Lod &data_lod = _data->lods[ob.lod];
if (!_data->try_set_block_buffer(
ob.position, ob.lod, ob.voxels, ob.type == VoxelEngine::BlockDataOutput::TYPE_LOADED)) {
++_stats.dropped_block_loads;
return;
}
/*VoxelDataLodMap::Lod &data_lod = _data->lods[ob.lod];
if (ob.voxels->get_size() != Vector3iUtil::create(data_lod.map.get_block_size())) {
// Voxel block size is incorrect, drop it
@ -1567,15 +1584,16 @@ void VoxelLodTerrain::apply_data_block_response(VoxelEngine::BlockDataOutput &ob
RWLockWrite wlock(data_lod.map_lock);
VoxelDataBlock *block = data_lod.map.set_block_buffer(ob.position, ob.voxels, false);
CRASH_COND(block == nullptr);
block->set_edited(ob.type == VoxelEngine::BlockDataOutput::TYPE_LOADED);
block->set_edited(ob.type == VoxelEngine::BlockDataOutput::TYPE_LOADED);*/
} else {
// Loading returned an empty block: that means we know the stream does not contain a block here.
// When doing data streaming, we'll generate on the fly if this block is queried.
VoxelDataLodMap::Lod &data_lod = _data->lods[ob.lod];
_data->set_empty_block_buffer(ob.position, ob.lod);
/*VoxelDataLodMap::Lod &data_lod = _data->lods[ob.lod];
RWLockWrite wlock(data_lod.map_lock);
VoxelDataBlock *block = data_lod.map.set_empty_block(ob.position, false);
ZN_ASSERT(block != nullptr);
ZN_ASSERT(block != nullptr);*/
}
{
@ -1602,7 +1620,7 @@ void VoxelLodTerrain::apply_mesh_update(VoxelEngine::BlockMeshOutput &ob) {
CRASH_COND(_update_data == nullptr);
VoxelLodTerrainUpdateData &update_data = *_update_data;
if (ob.lod >= update_data.settings.lod_count) {
if (ob.lod >= get_lod_count()) {
// Sorry, LOD configuration changed, drop that mesh
++_stats.dropped_block_meshs;
return;
@ -1866,7 +1884,7 @@ void VoxelLodTerrain::apply_virtual_texture_update_to_block(
void VoxelLodTerrain::process_deferred_collision_updates(uint32_t timeout_msec) {
ZN_PROFILE_SCOPE();
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
// TODO We may move this in a time spread task somehow, the timeout does not account for them so could take longer
const uint64_t then = get_ticks_msec();
@ -2056,7 +2074,7 @@ void VoxelLodTerrain::set_instancer(VoxelInstancer *instancer) {
Array VoxelLodTerrain::get_mesh_block_surface(Vector3i block_pos, int lod_index) const {
ZN_PROFILE_SCOPE();
const int lod_count = _update_data->settings.lod_count;
const int lod_count = get_lod_count();
ERR_FAIL_COND_V(lod_index < 0 || lod_index >= lod_count, Array());
const VoxelMeshMap<VoxelMeshBlockVLT> &mesh_map = _mesh_maps_per_lod[lod_index];
@ -2077,7 +2095,7 @@ Array VoxelLodTerrain::get_mesh_block_surface(Vector3i block_pos, int lod_index)
}
void VoxelLodTerrain::get_meshed_block_positions_at_lod(int lod_index, std::vector<Vector3i> &out_positions) const {
const int lod_count = _update_data->settings.lod_count;
const int lod_count = get_lod_count();
ERR_FAIL_COND(lod_index < 0 || lod_index >= lod_count);
const VoxelMeshMap<VoxelMeshBlockVLT> &mesh_map = _mesh_maps_per_lod[lod_index];
@ -2096,20 +2114,21 @@ void VoxelLodTerrain::save_all_modified_blocks(bool with_copy) {
// This could be part of the update task if async, but here we want it to be immediate.
_update_data->wait_for_end_of_task();
VoxelLodTerrainUpdateTask::flush_pending_lod_edits(
_update_data->state, *_data, _generator, _update_data->settings.full_load_mode, get_mesh_block_size());
VoxelLodTerrainUpdateTask::flush_pending_lod_edits(_update_data->state, *_data, get_mesh_block_size());
std::vector<VoxelLodTerrainUpdateData::BlockToSave> blocks_to_save;
if (_stream.is_valid()) {
for (unsigned int i = 0; i < _data->lod_count; ++i) {
Ref<VoxelStream> stream = get_stream();
if (stream.is_valid()) {
// That may cause a stutter, so should be used when the player won't notice
_data->for_each_block(ScheduleSaveAction{ blocks_to_save });
/*for (unsigned int i = 0; i < _data->lod_count; ++i) {
VoxelDataLodMap::Lod &data_lod = _data->lods[i];
RWLockRead rlock(data_lod.map_lock);
// That may cause a stutter, so should be used when the player won't notice
data_lod.map.for_each_block(ScheduleSaveAction{ blocks_to_save });
}
}*/
if (_instancer != nullptr && _stream->supports_instance_blocks()) {
if (_instancer != nullptr && stream->supports_instance_blocks()) {
_instancer->save_all_modified_blocks();
}
}
@ -2181,7 +2200,8 @@ void VoxelLodTerrain::restart_stream() {
void VoxelLodTerrain::remesh_all_blocks() {
// Requests a new mesh for all mesh blocks, without dropping everything first
_update_data->wait_for_end_of_task();
for (unsigned int lod_index = 0; lod_index < _update_data->settings.lod_count; ++lod_index) {
const unsigned int lod_count = get_lod_count();
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
VoxelLodTerrainUpdateData::Lod &lod = _update_data->state.lods[lod_index];
for (auto it = lod.mesh_map_state.map.begin(); it != lod.mesh_map_state.map.end(); ++it) {
VoxelLodTerrainUpdateTask::schedule_mesh_update(it->second, it->first, lod.blocks_pending_update);
@ -2202,7 +2222,7 @@ void VoxelLodTerrain::set_voxel_bounds(Box3i p_box) {
bounds_in_voxels.size[i] = octree_size;
}
}
_update_data->settings.bounds_in_voxels = bounds_in_voxels;
_data->set_bounds(bounds_in_voxels);
_update_data->state.force_update_octrees_next_update = true;
}
@ -2329,20 +2349,21 @@ TypedArray<String> VoxelLodTerrain::get_configuration_warnings() const {
}
// Virtual textures
if (_generator.is_valid()) {
Ref<VoxelGenerator> generator = get_generator();
if (generator.is_valid()) {
if (is_normalmap_enabled()) {
if (!_generator->supports_series_generation()) {
if (!generator->supports_series_generation()) {
warnings.append(TTR(
"Normalmaps are enabled, but it requires the generator to be able to generate series of "
"positions with `generate_series`. The current generator ({0}) does not support it.")
.format(varray(_generator->get_class())));
.format(varray(generator->get_class())));
}
if ((_generator->get_used_channels_mask() & (1 << VoxelBufferInternal::CHANNEL_SDF)) == 0) {
if ((generator->get_used_channels_mask() & (1 << VoxelBufferInternal::CHANNEL_SDF)) == 0) {
warnings.append(TTR("Normalmaps are enabled, but it requires the generator to use the SDF "
"channel. The current generator ({0}) does not support it, or is not "
"configured to do so.")
.format(varray(_generator->get_class())));
.format(varray(generator->get_class())));
}
if (shader_material.is_valid()) {
@ -2396,7 +2417,7 @@ Array VoxelLodTerrain::debug_raycast_mesh_block(Vector3 world_origin, Vector3 wo
const float max_distance = 256;
const float step = 2.f;
float distance = 0.f;
const unsigned int lod_count = _update_data->settings.lod_count;
const unsigned int lod_count = get_lod_count();
const unsigned int mesh_block_size_po2 = _update_data->settings.mesh_block_size_po2;
Array hits;
@ -2427,16 +2448,11 @@ Dictionary VoxelLodTerrain::debug_get_data_block_info(Vector3 fbpos, int lod_ind
const VoxelLodTerrainUpdateData::Lod &lod = _update_data->state.lods[lod_index];
const VoxelDataLodMap::Lod &data_lod = _data->lods[lod_index];
Vector3i bpos = math::floor_to_int(fbpos);
int loading_state = 0;
bool has_block = false;
{
RWLockRead rlock(data_lod.map_lock);
has_block = data_lod.map.has_block(bpos);
}
Vector3i bpos = math::floor_to_int(fbpos);
const bool has_block = _data->has_block(bpos, lod_index);
if (has_block) {
loading_state = 2;
} else {
@ -2698,12 +2714,10 @@ void VoxelLodTerrain::update_gizmos() {
// Edited blocks
if (debug_get_draw_flag(DEBUG_DRAW_EDITED_BLOCKS) && _edited_blocks_gizmos_lod_index < lod_count) {
const VoxelDataLodMap::Lod &data_lod = _data->lods[_edited_blocks_gizmos_lod_index];
const int data_block_size = get_data_block_size() << _edited_blocks_gizmos_lod_index;
const Basis basis(Basis().scaled(Vector3(data_block_size, data_block_size, data_block_size)));
RWLockRead rlock(data_lod.map_lock);
data_lod.map.for_each_block(
_data->for_each_block_at_lod(
[&dr, parent_transform, data_block_size, basis](const Vector3i &bpos, const VoxelDataBlock &block) {
if (block.is_edited()) {
const Transform3D local_transform(basis, bpos * data_block_size);
@ -2711,7 +2725,8 @@ void VoxelLodTerrain::update_gizmos() {
const Color8 c = block.is_modified() ? Color8(255, 255, 0, 255) : Color8(0, 255, 0, 255);
dr.draw_box_mm(t, c);
}
});
},
_edited_blocks_gizmos_lod_index);
}
// Debug updates
@ -2766,8 +2781,10 @@ Array VoxelLodTerrain::_b_debug_print_sdf_top_down(Vector3i center, Vector3i ext
ERR_FAIL_COND_V(!math::is_valid_size(extents), Array());
Array image_array;
const unsigned int lod_count = get_lod_count();
const VoxelData &data = *_data;
for (unsigned int lod_index = 0; lod_index < _data->lod_count; ++lod_index) {
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
const Box3i world_box = Box3i::from_center_extents(center >> lod_index, extents >> lod_index);
if (Vector3iUtil::get_volume(world_box.size) == 0) {
@ -2777,11 +2794,11 @@ Array VoxelLodTerrain::_b_debug_print_sdf_top_down(Vector3i center, Vector3i ext
VoxelBufferInternal buffer;
buffer.create(world_box.size);
world_box.for_each_cell([this, world_box, &buffer](const Vector3i &world_pos) {
world_box.for_each_cell([this, world_box, &buffer, &data](const Vector3i &world_pos) {
const Vector3i rpos = world_pos - world_box.pos;
VoxelSingleValue v;
v.f = 1.f;
v = get_voxel(world_pos, VoxelBufferInternal::CHANNEL_SDF, v);
v = data.get_voxel(world_pos, VoxelBufferInternal::CHANNEL_SDF, v);
buffer.set_voxel_f(v.f, rpos.x, rpos.y, rpos.z, VoxelBufferInternal::CHANNEL_SDF);
});
@ -2803,13 +2820,14 @@ int VoxelLodTerrain::_b_debug_get_mesh_block_count() const {
}
int VoxelLodTerrain::_b_debug_get_data_block_count() const {
int sum = 0;
return _data->get_block_count();
/*int sum = 0;
for (unsigned int lod_index = 0; lod_index < _data->lod_count; ++lod_index) {
const VoxelDataLodMap::Lod &data_lod = _data->lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
sum += data_lod.map.get_block_count();
}
return sum;
return sum;*/
}
Error VoxelLodTerrain::_b_debug_dump_as_scene(String fpath, bool include_instancer) const {

View File

@ -3,7 +3,7 @@
#include "../../engine/mesh_block_task.h"
#include "../../engine/voxel_engine.h"
#include "../../storage/voxel_data_map.h"
#include "../../storage/voxel_data.h"
#include "../../util/godot/shader_material_pool.h"
#include "../voxel_mesh_map.h"
#include "../voxel_node.h"
@ -111,6 +111,7 @@ public:
void set_normalmap_begin_lod_index(int lod_index);
int get_normalmap_begin_lod_index() const;
/*
bool is_area_editable(Box3i p_box) const;
VoxelSingleValue get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval);
bool try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value);
@ -160,6 +161,7 @@ public:
}
post_edit_area(voxel_box);
}
*/
// These must be called after an edit
void post_edit_area(Box3i p_box);
@ -172,8 +174,8 @@ public:
void set_voxel_bounds(Box3i p_box);
inline Box3i get_voxel_bounds() const {
CRASH_COND(_update_data == nullptr);
return _update_data->settings.bounds_in_voxels;
ZN_ASSERT(_data != nullptr);
return _data->get_bounds();
}
void set_collision_update_delay(int delay_msec);
@ -272,7 +274,12 @@ public:
Array get_mesh_block_surface(Vector3i block_pos, int lod_index) const;
void get_meshed_block_positions_at_lod(int lod_index, std::vector<Vector3i> &out_positions) const;
std::shared_ptr<VoxelDataLodMap> get_storage() const {
inline VoxelData &get_storage() const {
ZN_ASSERT(_data != nullptr);
return *_data;
}
inline std::shared_ptr<VoxelData> get_storage_shared() const {
return _data;
}
@ -398,12 +405,12 @@ private:
VoxelInstancer *_instancer = nullptr;
Ref<VoxelMesher> _mesher;
Ref<VoxelGenerator> _generator;
Ref<VoxelStream> _stream;
// Ref<VoxelGenerator> _generator;
// Ref<VoxelStream> _stream;
// Data stored with a shared pointer so it can be sent to asynchronous tasks
bool _threaded_update_enabled = false;
std::shared_ptr<VoxelDataLodMap> _data;
std::shared_ptr<VoxelData> _data;
std::shared_ptr<VoxelLodTerrainUpdateData> _update_data;
std::shared_ptr<StreamingDependency> _streaming_dependency;
std::shared_ptr<MeshingDependency> _meshing_dependency;

View File

@ -4,7 +4,7 @@
#include "../../constants/voxel_constants.h"
#include "../../engine/distance_normalmaps.h"
#include "../../generators/voxel_generator.h"
#include "../../storage/voxel_data_map.h"
#include "../../storage/voxel_data.h"
#include "../../streams/voxel_stream.h"
#include "../../util/fixed_array.h"
#include "../voxel_mesh_map.h"
@ -47,12 +47,13 @@ struct VoxelLodTerrainUpdateData {
// Area within which voxels can exist.
// Note, these bounds might not be exactly represented. This volume is chunk-based, so the result will be
// approximated to the closest chunk.
Box3i bounds_in_voxels;
unsigned int lod_count = 0;
// Box3i bounds_in_voxels;
// unsigned int lod_count = 0;
// Distance between a viewer and the end of LOD0
float lod_distance = 0.f;
unsigned int view_distance_voxels = 512;
bool full_load_mode = false;
// bool full_load_mode = false;
bool run_stream_in_editor = true;
// If true, try to generate blocks and store them in the data map before posting mesh requests.
// If false, everything will generate non-edited voxels on the fly instead.

View File

@ -14,10 +14,44 @@
namespace zylann::voxel {
void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
Ref<VoxelGenerator> generator, bool full_load_mode, const int mesh_block_size) {
void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(
VoxelLodTerrainUpdateData::State &state, VoxelData &data, const int mesh_block_size) {
ZN_DSTACK();
ZN_PROFILE_SCOPE();
static thread_local std::vector<Vector3i> tls_modified_lod0_blocks;
static thread_local std::vector<VoxelData::BlockLocation> tls_updated_block_locations;
const int data_block_size = data.get_block_size();
const int data_to_mesh_factor = mesh_block_size / data_block_size;
{
MutexLock lock(state.blocks_pending_lodding_lod0_mutex);
// Not sure if could just use `=`? What would std::vector do with capacity?
tls_modified_lod0_blocks.resize(state.blocks_pending_lodding_lod0.size());
memcpy(tls_modified_lod0_blocks.data(), state.blocks_pending_lodding_lod0.data(),
state.blocks_pending_lodding_lod0.size() * sizeof(Vector3i));
state.blocks_pending_lodding_lod0.clear();
}
tls_updated_block_locations.clear();
data.update_lods(to_span(tls_modified_lod0_blocks), &tls_updated_block_locations);
// Schedule mesh updates at every affected LOD
for (const VoxelData::BlockLocation loc : tls_updated_block_locations) {
const Vector3i mesh_block_pos = math::floordiv(loc.position, data_to_mesh_factor);
VoxelLodTerrainUpdateData::Lod &dst_lod = state.lods[loc.lod_index];
auto mesh_block_it = dst_lod.mesh_map_state.map.find(mesh_block_pos);
if (mesh_block_it != dst_lod.mesh_map_state.map.end()) {
// If a mesh exists here, it will need an update.
// If there is no mesh, it will probably get created later when we come closer to it
schedule_mesh_update(mesh_block_it->second, mesh_block_pos, dst_lod.blocks_pending_update);
}
}
/*
// Propagates edits performed so far to other LODs.
// These LODs must be currently in memory, otherwise terrain data will miss it.
// This is currently ensured by the fact we load blocks in a "pyramidal" way,
@ -27,7 +61,9 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
const int data_block_size = data.lods[0].map.get_block_size();
const int data_block_size_po2 = data.lods[0].map.get_block_size_pow2();
const int data_to_mesh_factor = mesh_block_size / data_block_size;
const unsigned int lod_count = data.lod_count;
const unsigned int lod_count = data.get_lod_count();
const bool streaming_enabled = data.is_streaming_enabled();
Ref<VoxelGenerator> generator = data.get_generator();
static thread_local FixedArray<std::vector<Vector3i>, constants::MAX_LOD> tls_blocks_to_process_per_lod;
@ -99,7 +135,7 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
src_block->set_needs_lodding(false);
if (dst_block == nullptr) {
if (full_load_mode) {
if (!streaming_enabled) {
// TODO Doing this on the main thread can be very demanding and cause a stall.
// We should find a way to make it asynchronous, not need mips, or not edit outside viewers area.
std::shared_ptr<VoxelBufferInternal> voxels = make_shared_instance<VoxelBufferInternal>();
@ -113,7 +149,7 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
ZN_PROFILE_SCOPE_NAMED("Generate");
generator->generate_block(q);
}
data.modifiers.apply(
data.get_modifiers().apply(
q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << dst_lod_index));
dst_block = dst_data_lod.map.set_block_buffer(dst_bpos, voxels, true);
@ -170,6 +206,7 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
// if (time_spent > 10) {
// print_line(String("Took {0} us to update lods").format(varray(time_spent)));
// }
*/
}
struct BeforeUnloadDataAction {
@ -195,7 +232,7 @@ struct BeforeUnloadDataAction {
}
};
static void unload_data_block_no_lock(VoxelLodTerrainUpdateData::Lod &lod, VoxelDataLodMap::Lod &data_lod,
/*static void unload_data_block_no_lock(VoxelLodTerrainUpdateData::Lod &lod, VoxelDataLodMap::Lod &data_lod,
Vector3i block_pos, std::vector<VoxelLodTerrainUpdateData::BlockToSave> &blocks_to_save, bool can_save) {
ZN_PROFILE_SCOPE();
@ -212,23 +249,27 @@ static void unload_data_block_no_lock(VoxelLodTerrainUpdateData::Lod &lod, Voxel
// No need to remove things from blocks_pending_load,
// This vector is filled and cleared immediately in the main process.
// It is a member only to re-use its capacity memory over frames.
}
}*/
static void process_unload_data_blocks_sliding_box(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
static void process_unload_data_blocks_sliding_box(VoxelLodTerrainUpdateData::State &state, VoxelData &data,
Vector3 p_viewer_pos, std::vector<VoxelLodTerrainUpdateData::BlockToSave> &blocks_to_save, bool can_save,
const VoxelLodTerrainUpdateData::Settings &settings) {
ZN_PROFILE_SCOPE_NAMED("Sliding box data unload");
// TODO Could it actually be enough to have a rolling update on all blocks?
// This should be the same distance relatively to each LOD
const int data_block_size = data.lods[0].map.get_block_size();
const int data_block_size_po2 = data.lods[0].map.get_block_size_pow2();
const int data_block_size = data.get_block_size();
const int data_block_size_po2 = data.get_block_size_po2();
const int data_block_region_extent =
VoxelEngine::get_octree_lod_block_region_extent(settings.lod_distance, data_block_size);
const Box3i bounds_in_voxels = data.get_bounds();
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int lod_count = data.lod_count;
const int lod_count = data.get_lod_count();
static thread_local std::vector<Box3i> tls_to_remove;
tls_to_remove.clear();
// Ignore largest lod because it can extend a little beyond due to the view distance setting.
// Instead, those blocks are unloaded by the octree forest management.
@ -245,8 +286,8 @@ static void process_unload_data_blocks_sliding_box(VoxelLodTerrainUpdateData::St
VoxelDataMap::voxel_to_block_b(math::floor_to_int(p_viewer_pos), block_size_po2);
const Box3i bounds_in_blocks = Box3i( //
settings.bounds_in_voxels.pos >> block_size_po2, //
settings.bounds_in_voxels.size >> block_size_po2);
bounds_in_voxels.pos >> block_size_po2, //
bounds_in_voxels.size >> block_size_po2);
const Box3i new_box =
Box3i::from_center_extents(viewer_block_pos_within_lod, Vector3iUtil::create(data_block_region_extent));
@ -262,14 +303,25 @@ static void process_unload_data_blocks_sliding_box(VoxelLodTerrainUpdateData::St
if (prev_box != new_box) {
ZN_PROFILE_SCOPE_NAMED("Unload data");
VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
RWLockWrite wlock(data_lod.map_lock);
prev_box.difference(new_box, [&lod, &data_lod, &blocks_to_save, can_save](Box3i out_of_range_box) {
// VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
// RWLockWrite wlock(data_lod.map_lock);
tls_to_remove.clear();
prev_box.difference_to_vec(new_box, tls_to_remove);
for (const Box3i bbox : tls_to_remove) {
data.unload_blocks(bbox, lod_index, //
[&blocks_to_save, can_save](VoxelDataBlock &block, Vector3i bpos) {
BeforeUnloadDataAction{ blocks_to_save, bpos, can_save }(block);
});
}
/*prev_box.difference(new_box, [&lod, &data_lod, &blocks_to_save, can_save](Box3i out_of_range_box) {
out_of_range_box.for_each_cell([&lod, &data_lod, &blocks_to_save, can_save](Vector3i pos) {
//print_line(String("Immerge {0}").format(varray(pos.to_vec3())));
unload_data_block_no_lock(lod, data_lod, pos, blocks_to_save, can_save);
});
});
});*/
}
{
@ -305,7 +357,7 @@ static void process_unload_data_blocks_sliding_box(VoxelLodTerrainUpdateData::St
}
static void process_unload_mesh_blocks_sliding_box(VoxelLodTerrainUpdateData::State &state, Vector3 p_viewer_pos,
const VoxelLodTerrainUpdateData::Settings &settings) {
const VoxelLodTerrainUpdateData::Settings &settings, const VoxelData &data) {
ZN_PROFILE_SCOPE_NAMED("Sliding box mesh unload");
// TODO Could it actually be enough to have a rolling update on all blocks?
@ -314,11 +366,13 @@ static void process_unload_mesh_blocks_sliding_box(VoxelLodTerrainUpdateData::St
const int mesh_block_size = 1 << mesh_block_size_po2;
const int mesh_block_region_extent =
VoxelEngine::get_octree_lod_block_region_extent(settings.lod_distance, mesh_block_size);
const int lod_count = data.get_lod_count();
const Box3i bounds_in_voxels = data.get_bounds();
// Ignore largest lod because it can extend a little beyond due to the view distance setting.
// Instead, those blocks are unloaded by the octree forest management.
// Iterating from big to small LOD so we can exit earlier if bounds don't intersect.
for (int lod_index = settings.lod_count - 2; lod_index >= 0; --lod_index) {
for (int lod_index = lod_count - 2; lod_index >= 0; --lod_index) {
ZN_PROFILE_SCOPE();
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
@ -326,8 +380,8 @@ static void process_unload_mesh_blocks_sliding_box(VoxelLodTerrainUpdateData::St
const Vector3i viewer_block_pos_within_lod = math::floor_to_int(p_viewer_pos) >> block_size_po2;
const Box3i bounds_in_blocks = Box3i( //
settings.bounds_in_voxels.pos >> block_size_po2, //
settings.bounds_in_voxels.size >> block_size_po2);
bounds_in_voxels.pos >> block_size_po2, //
bounds_in_voxels.size >> block_size_po2);
const Box3i new_box =
Box3i::from_center_extents(viewer_block_pos_within_lod, Vector3iUtil::create(mesh_block_region_extent));
@ -368,19 +422,20 @@ static void process_unload_mesh_blocks_sliding_box(VoxelLodTerrainUpdateData::St
}
void process_octrees_sliding_box(VoxelLodTerrainUpdateData::State &state, Vector3 p_viewer_pos,
const VoxelLodTerrainUpdateData::Settings &settings) {
const VoxelLodTerrainUpdateData::Settings &settings, const VoxelData &data) {
ZN_PROFILE_SCOPE_NAMED("Sliding box octrees");
// TODO Investigate if multi-octree can produce cracks in the terrain (so far I haven't noticed)
const unsigned int lod_count = data.get_lod_count();
const unsigned int mesh_block_size_po2 = settings.mesh_block_size_po2;
const unsigned int octree_size_po2 = LodOctree::get_octree_size_po2(mesh_block_size_po2, settings.lod_count);
const unsigned int octree_size_po2 = LodOctree::get_octree_size_po2(mesh_block_size_po2, lod_count);
const unsigned int octree_size = 1 << octree_size_po2;
const unsigned int octree_region_extent = 1 + settings.view_distance_voxels / (1 << octree_size_po2);
const Vector3i viewer_octree_pos =
(math::floor_to_int(p_viewer_pos) + Vector3iUtil::create(octree_size / 2)) >> octree_size_po2;
const Box3i bounds_in_octrees = settings.bounds_in_voxels.downscaled(octree_size);
const Box3i bounds_in_octrees = data.get_bounds().downscaled(octree_size);
const Box3i new_box = Box3i::from_center_extents(viewer_octree_pos, Vector3iUtil::create(octree_region_extent))
.clipped(bounds_in_octrees);
@ -454,12 +509,12 @@ void process_octrees_sliding_box(VoxelLodTerrainUpdateData::State &state, Vector
}
};
ExitAction exit_action{ state, settings.lod_count };
EnterAction enter_action{ state, settings.lod_count };
ExitAction exit_action{ state, lod_count };
EnterAction enter_action{ state, lod_count };
{
ZN_PROFILE_SCOPE_NAMED("Unload octrees");
const unsigned int last_lod_index = settings.lod_count - 1;
const unsigned int last_lod_index = lod_count - 1;
VoxelLodTerrainUpdateData::Lod &last_lod = state.lods[last_lod_index];
RWLockWrite wlock(last_lod.mesh_map_state.map_lock);
@ -503,7 +558,7 @@ static void add_transition_updates_around(VoxelLodTerrainUpdateData::Lod &lod, V
// or maybe get_transition_mask needs a different approach that also looks at higher lods?
}
void try_schedule_loading_with_neighbors_no_lock(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
/*void try_schedule_loading_with_neighbors_no_lock(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
const Vector3i &p_data_block_pos, uint8_t lod_index,
std::vector<VoxelLodTerrainUpdateData::BlockLocation> &blocks_to_load, const Box3i &bounds_in_voxels) {
//
@ -545,14 +600,14 @@ void try_schedule_loading_with_neighbors_no_lock(VoxelLodTerrainUpdateData::Stat
}
}
}
}
}*/
inline bool check_block_sizes(int data_block_size, int mesh_block_size) {
return (data_block_size == 16 || data_block_size == 32) && (mesh_block_size == 16 || mesh_block_size == 32) &&
mesh_block_size >= data_block_size;
}
bool check_block_mesh_updated(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
bool check_block_mesh_updated(VoxelLodTerrainUpdateData::State &state, const VoxelData &data,
VoxelLodTerrainUpdateData::MeshBlockState &mesh_block, Vector3i mesh_block_pos, uint8_t lod_index,
std::vector<VoxelLodTerrainUpdateData::BlockLocation> &blocks_to_load,
const VoxelLodTerrainUpdateData::Settings &settings) {
@ -565,28 +620,50 @@ bool check_block_mesh_updated(VoxelLodTerrainUpdateData::State &state, VoxelData
switch (mesh_state) {
case VoxelLodTerrainUpdateData::MESH_NEVER_UPDATED:
case VoxelLodTerrainUpdateData::MESH_NEED_UPDATE: {
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int data_block_size = data.lods[0].map.get_block_size();
#ifdef DEBUG_ENABLED
ERR_FAIL_COND_V(!check_block_sizes(data_block_size, mesh_block_size), false);
#endif
// Find data block neighbors positions
const int factor = mesh_block_size / data_block_size;
const Vector3i data_block_pos0 = factor * mesh_block_pos;
const Box3i data_box(data_block_pos0 - Vector3i(1, 1, 1), Vector3iUtil::create(factor) + Vector3i(2, 2, 2));
const Box3i bounds = settings.bounds_in_voxels.downscaled(data_block_size);
FixedArray<Vector3i, 56> neighbor_positions;
unsigned int neighbor_positions_count = 0;
data_box.for_inner_outline([bounds, &neighbor_positions, &neighbor_positions_count](Vector3i pos) {
if (bounds.contains(pos)) {
neighbor_positions[neighbor_positions_count] = pos;
++neighbor_positions_count;
}
});
bool surrounded = true;
if (settings.full_load_mode == false) {
const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
if (data.is_streaming_enabled()) {
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int data_block_size = data.get_block_size();
#ifdef DEBUG_ENABLED
ERR_FAIL_COND_V(!check_block_sizes(data_block_size, mesh_block_size), false);
#endif
// TODO Why are we only checking neighbors?
// This is also redundant when called from `check_block_loaded_and_meshed`
// Find data block neighbors positions
const int factor = mesh_block_size / data_block_size;
const Vector3i data_block_pos0 = factor * mesh_block_pos;
const Box3i data_box(
data_block_pos0 - Vector3i(1, 1, 1), Vector3iUtil::create(factor) + Vector3i(2, 2, 2));
const Box3i bounds = data.get_bounds().downscaled(data_block_size);
// 56 is the maximum amount of positions that can be gathered this way with mesh block size 32.
FixedArray<Vector3i, 56> neighbor_positions;
unsigned int neighbor_positions_count = 0;
data_box.for_inner_outline([bounds, &neighbor_positions, &neighbor_positions_count](Vector3i pos) {
if (bounds.contains(pos)) {
neighbor_positions[neighbor_positions_count] = pos;
++neighbor_positions_count;
}
});
static thread_local std::vector<Vector3i> tls_missing;
tls_missing.clear();
// Check if neighbors are loaded
data.get_missing_blocks(to_span(neighbor_positions, neighbor_positions_count), lod_index, tls_missing);
surrounded = tls_missing.size() == 0;
// Schedule loading for missing neighbors
MutexLock lock(lod.loading_blocks_mutex);
for (const Vector3i &missing_pos : tls_missing) {
if (!lod.has_loading_block(missing_pos)) {
blocks_to_load.push_back({ missing_pos, lod_index });
lod.loading_blocks.insert(missing_pos);
}
}
/*const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
// Check if neighbors are loaded
RWLockRead rlock(data_lod.map_lock);
// TODO Optimization: could put in a temp vector and insert in one go after the loop?
@ -602,7 +679,7 @@ bool check_block_mesh_updated(VoxelLodTerrainUpdateData::State &state, VoxelData
lod.loading_blocks.insert(npos);
}
}
}
}*/
}
if (surrounded) {
@ -651,24 +728,41 @@ VoxelLodTerrainUpdateData::MeshBlockState &insert_new(
}
static bool check_block_loaded_and_meshed(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, VoxelDataLodMap &data, const Vector3i &p_mesh_block_pos,
const VoxelLodTerrainUpdateData::Settings &settings, const VoxelData &data, const Vector3i &p_mesh_block_pos,
uint8_t lod_index, std::vector<VoxelLodTerrainUpdateData::BlockLocation> &blocks_to_load) {
//
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int data_block_size = data.lods[0].map.get_block_size();
if (data.is_streaming_enabled()) {
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int data_block_size = data.get_block_size();
#ifdef DEBUG_ENABLED
ERR_FAIL_COND_V(!check_block_sizes(data_block_size, mesh_block_size), false);
ERR_FAIL_COND_V(!check_block_sizes(data_block_size, mesh_block_size), false);
#endif
if (settings.full_load_mode == false) {
// We want to know everything about the data intersecting this mesh block.
// This is not known in advance when we stream it, it has to be requested.
// When not streaming, `block == null` is the same as `!block->has_voxels()` so we wouldn't need to enter here.
VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
static thread_local std::vector<Vector3i> tls_missing;
tls_missing.clear();
const int factor = mesh_block_size / data_block_size;
const Box3i data_blocks_box = Box3i(p_mesh_block_pos * factor, Vector3iUtil::create(factor)).padded(1);
data.get_missing_blocks(data_blocks_box, lod_index, tls_missing);
if (tls_missing.size() > 0) {
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
for (const Vector3i &missing_bpos : tls_missing) {
if (!lod.has_loading_block(missing_bpos)) {
blocks_to_load.push_back({ missing_bpos, lod_index });
lod.loading_blocks.insert(missing_bpos);
}
}
return false;
}
/*VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
if (mesh_block_size > data_block_size) {
const int factor = mesh_block_size / data_block_size;
@ -706,9 +800,11 @@ static bool check_block_loaded_and_meshed(VoxelLodTerrainUpdateData::State &stat
state, data, data_block_pos, lod_index, blocks_to_load, settings.bounds_in_voxels);
return false;
}
}
}*/
}
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
VoxelLodTerrainUpdateData::MeshBlockState *mesh_block = nullptr;
auto mesh_block_it = lod.mesh_map_state.map.find(p_mesh_block_pos);
if (mesh_block_it == lod.mesh_map_state.map.end()) {
@ -806,13 +902,14 @@ uint8_t VoxelLodTerrainUpdateTask::get_transition_mask(
}
static void process_octrees_fitting(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, VoxelDataLodMap &data, Vector3 p_viewer_pos,
const VoxelLodTerrainUpdateData::Settings &settings, VoxelData &data, Vector3 p_viewer_pos,
std::vector<VoxelLodTerrainUpdateData::BlockLocation> &data_blocks_to_load) {
//
ZN_PROFILE_SCOPE();
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int octree_leaf_node_size = mesh_block_size;
const unsigned int lod_count = data.get_lod_count();
const bool force_update_octrees = state.force_update_octrees_next_update;
state.force_update_octrees_next_update = false;
@ -839,14 +936,14 @@ static void process_octrees_fitting(VoxelLodTerrainUpdateData::State &state,
unsigned int blocked_octree_nodes = 0;
// TODO Maintain a vector to make iteration faster?
// TODO Optimization: Maintain a vector to make iteration faster?
for (auto octree_it = state.lod_octrees.begin(); octree_it != state.lod_octrees.end(); ++octree_it) {
ZN_PROFILE_SCOPE();
struct OctreeActions {
VoxelLodTerrainUpdateData::State &state;
const VoxelLodTerrainUpdateData::Settings &settings;
VoxelDataLodMap &data;
VoxelData &data;
std::vector<VoxelLodTerrainUpdateData::BlockLocation> &data_blocks_to_load;
Vector3i block_offset_lod0;
unsigned int blocked_count = 0;
@ -985,7 +1082,7 @@ static void process_octrees_fitting(VoxelLodTerrainUpdateData::State &state,
};
const Vector3i block_pos_maxlod = octree_it->first;
const Vector3i block_offset_lod0 = block_pos_maxlod << (settings.lod_count - 1);
const Vector3i block_offset_lod0 = block_pos_maxlod << (lod_count - 1);
const Vector3 relative_viewer_pos = p_viewer_pos - Vector3(mesh_block_size * block_offset_lod0);
OctreeActions octree_actions{ //
@ -1029,7 +1126,7 @@ static void process_octrees_fitting(VoxelLodTerrainUpdateData::State &state,
if (mesh_block.active) {
const uint8_t mask =
VoxelLodTerrainUpdateTask::get_transition_mask(state, bpos, lod_index, settings.lod_count);
VoxelLodTerrainUpdateTask::get_transition_mask(state, bpos, lod_index, lod_count);
mesh_block.transition_mask = mask;
lod.mesh_blocks_to_update_transitions.push_back(
VoxelLodTerrainUpdateData::TransitionUpdate{ bpos, mask });
@ -1067,7 +1164,7 @@ static void init_sparse_octree_priority_dependency(PriorityDependency &dep, Vect
// This is only if we want to cache voxel data
static void request_block_generate(uint32_t volume_id, unsigned int data_block_size,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelData> &data,
Vector3i block_pos, int lod, std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data,
const Transform3D &volume_transform, float lod_distance, std::shared_ptr<AsyncDependencyTracker> tracker,
bool allow_drop, BufferedTaskScheduler &task_scheduler) {
@ -1096,7 +1193,7 @@ static void request_block_generate(uint32_t volume_id, unsigned int data_block_s
// Used only when streaming block by block
static void request_block_load(uint32_t volume_id, unsigned int data_block_size,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelData> &data,
Vector3i block_pos, int lod, bool request_instances,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
const VoxelLodTerrainUpdateData::Settings &settings, BufferedTaskScheduler &task_scheduler) {
@ -1126,7 +1223,7 @@ static void request_block_load(uint32_t volume_id, unsigned int data_block_size,
static void send_block_data_requests(uint32_t volume_id,
Span<const VoxelLodTerrainUpdateData::BlockLocation> blocks_to_load,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelData> &data,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, unsigned int data_block_size,
bool request_instances, const Transform3D &volume_transform,
const VoxelLodTerrainUpdateData::Settings &settings, BufferedTaskScheduler &task_scheduler) {
@ -1139,18 +1236,18 @@ static void send_block_data_requests(uint32_t volume_id,
}
static void apply_block_data_requests_as_empty(Span<const VoxelLodTerrainUpdateData::BlockLocation> blocks_to_load,
VoxelDataLodMap &data, VoxelLodTerrainUpdateData::State &state) {
VoxelData &data, VoxelLodTerrainUpdateData::State &state) {
for (unsigned int i = 0; i < blocks_to_load.size(); ++i) {
const VoxelLodTerrainUpdateData::BlockLocation loc = blocks_to_load[i];
VoxelDataLodMap::Lod &data_lod = data.lods[loc.lod];
VoxelLodTerrainUpdateData::Lod &lod = state.lods[loc.lod];
{
MutexLock mlock(lod.loading_blocks_mutex);
lod.loading_blocks.erase(loc.position);
}
{
RWLockWrite wlock(data_lod.map_lock);
data_lod.map.set_empty_block(loc.position, false);
data.set_empty_block_buffer(loc.position, loc.lod);
// RWLockWrite wlock(data_lod.map_lock);
// data_lod.map.set_empty_block(loc.position, false);
}
}
}
@ -1183,21 +1280,22 @@ void VoxelLodTerrainUpdateTask::send_block_save_requests(uint32_t volume_id,
}
static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelDataLodMap> &data_ptr,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelData> &data_ptr,
std::shared_ptr<MeshingDependency> meshing_dependency,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
BufferedTaskScheduler &task_scheduler) {
//
ZN_PROFILE_SCOPE();
CRASH_COND(data_ptr == nullptr);
const VoxelDataLodMap &data = *data_ptr;
ZN_ASSERT(data_ptr != nullptr);
const VoxelData &data = *data_ptr;
const int data_block_size = data.lods[0].map.get_block_size();
const int data_block_size = data.get_block_size();
const int mesh_block_size = 1 << settings.mesh_block_size_po2;
const int render_to_data_factor = mesh_block_size / data_block_size;
const unsigned int lod_count = data.get_lod_count();
for (unsigned int lod_index = 0; lod_index < settings.lod_count; ++lod_index) {
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
ZN_PROFILE_SCOPE();
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
@ -1241,7 +1339,12 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
Box3i(render_to_data_factor * mesh_block_pos, Vector3iUtil::create(render_to_data_factor))
.padded(1);
const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
// Iteration order matters for thread access.
// The array also implicitely encodes block position due to the convention being used,
// so there is no need to also include positions in the request
task->blocks_count = data.get_blocks_with_voxel_data(data_box, lod_index, to_span(task->blocks));
/*const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
// Iteration order matters for thread access.
@ -1256,7 +1359,7 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
task->blocks[task->blocks_count] = nblock->get_voxels_shared();
}
++task->blocks_count;
});
});*/
// TODO There is inconsistency with coordinates sent to this function.
// Sometimes we send data block coordinates, sometimes we send mesh block coordinates. They aren't always
@ -1277,14 +1380,18 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
// This function schedules one parallel task for every block.
// The returned tracker may be polled to detect when it is complete.
static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelDataLodMap> data_ptr,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelData> data_ptr,
Span<const Box3i> voxel_boxes, Span<IThreadedTask *> next_tasks, uint32_t volume_id,
std::shared_ptr<StreamingDependency> &stream_dependency,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
BufferedTaskScheduler &task_scheduler) {
ZN_PROFILE_SCOPE();
ERR_FAIL_COND_V_MSG(settings.full_load_mode == false, nullptr, "This function can only be used in full load mode");
ZN_ASSERT(data_ptr != nullptr);
VoxelData &data = *data_ptr;
ZN_ASSERT_RETURN_V_MSG(
data.is_streaming_enabled() == false, nullptr, "This function can only be used in full load mode");
struct TaskArguments {
Vector3i block_pos;
@ -1293,11 +1400,10 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
std::vector<TaskArguments> todo;
ZN_ASSERT(data_ptr != nullptr);
VoxelDataLodMap &data = *data_ptr;
const unsigned int data_block_size = data.lods[0].map.get_block_size();
const unsigned int data_block_size = data.get_block_size();
const unsigned int lod_count = data.get_lod_count();
for (unsigned int lod_index = 0; lod_index < settings.lod_count; ++lod_index) {
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
for (unsigned int box_index = 0; box_index < voxel_boxes.size(); ++box_index) {
ZN_PROFILE_SCOPE_NAMED("Box");
@ -1308,7 +1414,18 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
// ZN_PRINT_VERBOSE(String("Preloading box {0} at lod {1}")
// .format(varray(block_box.to_string(), lod_index)));
const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
static thread_local std::vector<Vector3i> tls_missing;
tls_missing.clear();
data.get_missing_blocks(block_box, lod_index, tls_missing);
for (const Vector3i &missing_bpos : tls_missing) {
if (!lod.has_loading_block(missing_bpos)) {
todo.push_back(TaskArguments{ missing_bpos, lod_index });
lod.loading_blocks.insert(missing_bpos);
}
}
/*const VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
RWLockRead rlock(data_lod.map_lock);
MutexLock lock(lod.loading_blocks_mutex);
@ -1317,7 +1434,7 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
todo.push_back({ block_pos, lod_index });
lod.loading_blocks.insert(block_pos);
}
});
});*/
}
}
@ -1356,8 +1473,8 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
}
static void process_async_edits(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelDataLodMap> &data,
uint32_t volume_id, std::shared_ptr<StreamingDependency> &stream_dependency,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelData> &data, uint32_t volume_id,
std::shared_ptr<StreamingDependency> &stream_dependency,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
BufferedTaskScheduler &task_scheduler) {
ZN_PROFILE_SCOPE();
@ -1395,8 +1512,8 @@ static void process_async_edits(VoxelLodTerrainUpdateData::State &state,
}
}
static void process_changed_generated_areas(
VoxelLodTerrainUpdateData::State &state, const VoxelLodTerrainUpdateData::Settings &settings) {
static void process_changed_generated_areas(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, unsigned int lod_count) {
const unsigned int mesh_block_size = 1 << settings.mesh_block_size_po2;
MutexLock lock(state.changed_generated_areas_mutex);
@ -1404,7 +1521,7 @@ static void process_changed_generated_areas(
return;
}
for (unsigned int lod_index = 0; lod_index < settings.lod_count; ++lod_index) {
for (unsigned int lod_index = 0; lod_index < lod_count; ++lod_index) {
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
for (auto box_it = state.changed_generated_areas.begin(); box_it != state.changed_generated_areas.end();
@ -1449,7 +1566,7 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
VoxelLodTerrainUpdateData &update_data = *_update_data;
VoxelLodTerrainUpdateData::State &state = update_data.state;
const VoxelLodTerrainUpdateData::Settings &settings = update_data.settings;
VoxelDataLodMap &data = *_data;
VoxelData &data = *_data;
Ref<VoxelGenerator> generator = _streaming_dependency->generator;
Ref<VoxelStream> stream = _streaming_dependency->stream;
ProfilingClock profiling_clock;
@ -1459,7 +1576,7 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
const bool stream_enabled = (stream.is_valid() || generator.is_valid()) &&
(Engine::get_singleton()->is_editor_hint() == false || settings.run_stream_in_editor);
CRASH_COND(data.lod_count != update_data.settings.lod_count);
const unsigned int lod_count = data.get_lod_count();
for (unsigned int lod_index = 0; lod_index < state.lods.size(); ++lod_index) {
const VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
@ -1478,10 +1595,10 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
// These are deferred from edits so we can batch them.
// It has to happen first because blocks can be unloaded afterwards.
// This is also what causes meshes to update after edits.
flush_pending_lod_edits(state, data, generator, settings.full_load_mode, 1 << settings.mesh_block_size_po2);
flush_pending_lod_edits(state, data, 1 << settings.mesh_block_size_po2);
// Other mesh updates
process_changed_generated_areas(state, settings);
process_changed_generated_areas(state, settings, lod_count);
static thread_local std::vector<VoxelLodTerrainUpdateData::BlockToSave> data_blocks_to_save;
static thread_local std::vector<VoxelLodTerrainUpdateData::BlockLocation> data_blocks_to_load;
@ -1490,17 +1607,17 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
profiling_clock.restart();
{
// Unload data blocks falling out of block region extent
if (update_data.settings.full_load_mode == false) {
if (data.is_streaming_enabled()) {
process_unload_data_blocks_sliding_box(
state, data, _viewer_pos, data_blocks_to_save, stream.is_valid(), settings);
}
// Unload mesh blocks falling out of block region extent
process_unload_mesh_blocks_sliding_box(state, _viewer_pos, settings);
process_unload_mesh_blocks_sliding_box(state, _viewer_pos, settings, data);
// Create and remove octrees in a grid around the viewer.
// Mesh blocks drive the loading of voxel data and visuals.
process_octrees_sliding_box(state, _viewer_pos, settings);
process_octrees_sliding_box(state, _viewer_pos, settings, data);
state.stats.blocked_lods = 0;
@ -1521,7 +1638,7 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
ZN_PROFILE_SCOPE_NAMED("IO requests");
// It's possible the user didn't set a stream yet, or it is turned off
if (stream_enabled) {
const unsigned int data_block_size = data.lods[0].map.get_block_size();
const unsigned int data_block_size = data.get_block_size();
if (stream.is_null() && !settings.cache_generated_blocks) {
// TODO Optimization: not ideal because a bit delayed. It requires a second update cycle for meshes to

View File

@ -20,7 +20,7 @@ struct MeshingDependency;
//
class VoxelLodTerrainUpdateTask : public IThreadedTask {
public:
VoxelLodTerrainUpdateTask(std::shared_ptr<VoxelDataLodMap> p_data,
VoxelLodTerrainUpdateTask(std::shared_ptr<VoxelData> p_data,
std::shared_ptr<VoxelLodTerrainUpdateData> p_update_data,
std::shared_ptr<StreamingDependency> p_streaming_dependency,
std::shared_ptr<MeshingDependency> p_meshing_dependency,
@ -41,8 +41,8 @@ public:
// Functions also used outside of this task
static void flush_pending_lod_edits(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
Ref<VoxelGenerator> generator, bool full_load_mode, const int mesh_block_size);
static void flush_pending_lod_edits(
VoxelLodTerrainUpdateData::State &state, VoxelData &data, const int mesh_block_size);
static uint8_t get_transition_mask(
const VoxelLodTerrainUpdateData::State &state, Vector3i block_pos, int lod_index, unsigned int lod_count);
@ -68,7 +68,7 @@ public:
BufferedTaskScheduler &task_scheduler);
private:
std::shared_ptr<VoxelDataLodMap> _data;
std::shared_ptr<VoxelData> _data;
std::shared_ptr<VoxelLodTerrainUpdateData> _update_data;
std::shared_ptr<StreamingDependency> _streaming_dependency;
std::shared_ptr<MeshingDependency> _meshing_dependency;

View File

@ -213,7 +213,7 @@ public:
// Subtracts another box from the current box.
// If any, boxes composing the remaining volume are added to the given vector.
inline void difference(const Box3i &b, std::vector<Box3i> &output) {
inline void difference_to_vec(const Box3i &b, std::vector<Box3i> &output) const {
difference(b, [&output](const Box3i &sub_box) { output.push_back(sub_box); });
}