Merge branch 'modifiers'

master
Marc Gilleron 2022-06-20 19:53:23 +01:00
commit c51f3d8005
35 changed files with 1572 additions and 177 deletions

View File

@ -417,3 +417,26 @@ Node name | Description
Curve | Returns the value of a custom `curve` at coordinate `x`, where `x` is in the range `[0..1]`. The `curve` is specified with a `Curve` resource.
Image2D | Returns the value of the red channel of an `image` at coordinates `(x, y)`, where `x` and `y` are in pixels and the return value is in the range `[0..1]` (or more if the image has an HDR format). If coordinates are outside the image, they will be wrapped around. No filtering is performed. The image must have an uncompressed format.
Modifiers
-----------
Modifiers are generators that affect a limited region of the volume. They can stack on top of base generated voxels or other modifiers, and affect the final result. This is a workflow that mostly serves if your world has a finite size, and you want to set up specific shapes of the landscape in a non-destructive way from the editor.
!!! note
This feature is only implemented with `VoxelLodTerrain` at the moment, and only works to sculpt smooth voxels. It is in early stages so it is quite limited.
Modifiers can be added with nodes as child of the terrain. `VoxelModifierSphere` adds or subtracts a sphere, while `VoxelModifierMesh` adds or subtracts a mesh. For the latter, the mesh must be baked into an SDF volume first, using the `VoxelMeshSDF` resource.
Because modifiers are part of the procedural generation stack, destructive edits will always override them. If a block is edited, modifiers cannot affect it. It is then assumed that such edits would come from players at runtime, and that modifiers don't change.
Caching
---------
Generators are designed to be deterministic: if the same area is generated twice, the result must be the same. This means, ultimately, we only need to store edited voxels (aka "destructive" editing), while non-edited regions can be recomputed on the fly. Even if you want to access one voxel and it happens to be in a non-edited location, then the generator will be called just to obtain that voxel.
However, if a generator is too expensive or not expected to run this way, it may be desirable to store the output in memory so that querying the same area again picks up the cached data.
By default, `VoxelTerrain` caches blocks in memory until they get far from any viewer. `VoxelLodTerrain` does not cache blocks by default. There is no option yet to change that behavior.
It is also possible to tell a `VoxelGenerator` to save its outputs to the current `VoxelStream`, if any is setup. However, these blocks will act as edited ones, so they will behave as if it was changes done destructively.

View File

@ -3,9 +3,10 @@
#include "../storage/funcs.h"
#include "../util/fixed_array.h"
#include "../util/math/conv.h"
#include "../util/math/sdf.h"
#include "../util/math/vector3.h"
#include "../util/math/vector3f.h"
#include <core/math/transform_3d.h>
namespace zylann::voxel {
@ -178,6 +179,25 @@ struct SdfSphere {
}
};
struct SdfBufferShape {
Span<const float> buffer;
Vector3i buffer_size;
Transform3D world_to_buffer;
float isolevel;
float sdf_scale;
inline real_t operator()(const Vector3 &wpos) const {
// Transform terrain-space position to buffer-space
const Vector3f lpos = to_vec3f(world_to_buffer.xform(wpos));
if (lpos.x < 0 || lpos.y < 0 || lpos.z < 0 || lpos.x >= buffer_size.x || lpos.y >= buffer_size.y ||
lpos.z >= buffer_size.z) {
// Outside the buffer
return 100;
}
return interpolate_trilinear(buffer, buffer_size, lpos) * sdf_scale - isolevel;
}
};
struct TextureParams {
float opacity = 1.f;
float sharpness = 2.f;

View File

@ -12,7 +12,6 @@ class SceneTree;
namespace zylann::voxel {
// Contains the baked signed distance field of a mesh, which can be used to sculpt terrain.
// TODO Make it a resource so we can pre-build, save and load the baked data more easily
class VoxelMeshSDF : public Resource {
GDCLASS(VoxelMeshSDF, Resource)
public:
@ -62,7 +61,14 @@ public:
// It is currently needed to ensure `VoxelServerUpdater` gets created so it can tick the task system...
void bake_async(SceneTree *scene_tree);
// Accesses baked SDF data.
// WARNING: don't modify this buffer. Only read from it.
// There are some usages (like modifiers) that will read it from different threads,
// but there is no thread safety in case of direct modification.
// TODO Introduce a VoxelBufferReadOnly? Since that's likely the only way in an object-oriented script API...
Ref<gd::VoxelBuffer> get_voxel_buffer() const;
// Gets the padded bounding box of the model. This is important to know for signed distances to be coherent.
AABB get_aabb() const;
Array debug_check_sdf(Ref<Mesh> mesh);

View File

@ -237,9 +237,7 @@ void VoxelToolLodTerrain::do_sphere(Vector3 center, float radius) {
ERR_FAIL_COND(data == nullptr);
VoxelDataLodMap::Lod &data_lod = data->lods[0];
if (_terrain->is_full_load_mode_enabled()) {
preload_box(*data, box, _terrain->get_generator().ptr());
}
preload_box(*data, box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
ops::DoSphere op;
op.box = box;
@ -276,6 +274,7 @@ public:
RWLockRead rlock(data_lod.map_lock);
// TODO May want to fail if not all blocks were found
_op.blocks.reference_area(data_lod.map, _op.box);
// TODO Need to apply modifiers
_op();
}
_tracker->post_complete();
@ -767,38 +766,17 @@ void VoxelToolLodTerrain::stamp_sdf(
ERR_FAIL_COND(data == nullptr);
VoxelDataLodMap::Lod &data_lod = data->lods[0];
if (_terrain->is_full_load_mode_enabled()) {
preload_box(*data, voxel_box, _terrain->get_generator().ptr());
}
preload_box(*data, voxel_box, _terrain->get_generator().ptr(), !_terrain->is_full_load_mode_enabled());
// TODO Maybe more efficient to "rasterize" the box? We're going to iterate voxels the box doesnt intersect
// TODO Maybe we should scale SDF values based on the scale of the transform too
struct SdfBufferShape {
Span<const float> buffer;
Vector3i buffer_size;
Transform3D world_to_buffer;
float isolevel;
float sdf_scale;
inline real_t operator()(const Vector3 &wpos) const {
// Transform terrain-space position to buffer-space
const Vector3f lpos = to_vec3f(world_to_buffer.xform(wpos));
if (lpos.x < 0 || lpos.y < 0 || lpos.z < 0 || lpos.x >= buffer_size.x || lpos.y >= buffer_size.y ||
lpos.z >= buffer_size.z) {
// Outside the buffer
return 100;
}
return interpolate_trilinear(buffer, buffer_size, lpos) * sdf_scale - isolevel;
}
};
const Transform3D buffer_to_box =
Transform3D(Basis().scaled(Vector3(local_aabb.size / buffer.get_size())), local_aabb.position);
const Transform3D buffer_to_world = box_to_world * buffer_to_box;
// TODO Support other depths, format should be accessible from the volume
ops::SdfOperation16bit<ops::SdfUnion, SdfBufferShape> op;
ops::SdfOperation16bit<ops::SdfUnion, ops::SdfBufferShape> op;
op.shape.world_to_buffer = buffer_to_world.affine_inverse();
op.shape.buffer_size = buffer.get_size();
op.shape.isolevel = isolevel;

View File

@ -212,6 +212,8 @@ void VoxelToolTerrain::set_voxel_metadata(Vector3i pos, Variant meta) {
VoxelDataMap &map = _terrain->get_storage();
VoxelDataBlock *block = map.get_block(map.voxel_to_block(pos));
ERR_FAIL_COND_MSG(block == nullptr, "Area not editable");
// TODO In this situation, the generator would need to be invoked to fill in the blank
ERR_FAIL_COND_MSG(!block->has_voxels(), "Area not cached");
RWLockWrite lock(block->get_voxels().get_lock());
VoxelMetadata *meta_storage = block->get_voxels().get_or_create_voxel_metadata(map.to_local(pos));
ERR_FAIL_COND(meta_storage == nullptr);
@ -223,6 +225,8 @@ Variant VoxelToolTerrain::get_voxel_metadata(Vector3i pos) const {
VoxelDataMap &map = _terrain->get_storage();
VoxelDataBlock *block = map.get_block(map.voxel_to_block(pos));
ERR_FAIL_COND_V_MSG(block == nullptr, Variant(), "Area not editable");
// TODO In this situation, the generator would need to be invoked to fill in the blank
ERR_FAIL_COND_V_MSG(!block->has_voxels(), Variant(), "Area not cached");
RWLockRead lock(block->get_voxels().get_lock());
const VoxelMetadata *meta = block->get_voxels_const().get_voxel_metadata(map.to_local(pos));
if (meta == nullptr) {
@ -273,7 +277,8 @@ void VoxelToolTerrain::run_blocky_random_tick_static(VoxelDataMap &map, Box3i vo
const Vector3i block_origin = map.block_to_voxel(block_pos);
VoxelDataBlock *block = map.get_block(block_pos);
if (block != nullptr) {
if (block != nullptr && block->has_voxels()) {
// Doing ONLY reads here.
{
RWLockRead lock(block->get_voxels().get_lock());
@ -395,7 +400,7 @@ void VoxelToolTerrain::for_each_voxel_metadata_in_area(AABB voxel_area, const Ca
data_block_box.for_each_cell([&map, &callback, voxel_box](Vector3i block_pos) {
VoxelDataBlock *block = map.get_block(block_pos);
if (block == nullptr) {
if (block == nullptr || !block->has_voxels()) {
return;
}

View File

@ -1,5 +1,6 @@
#include "voxel_terrain_editor_plugin.h"
#include "../../generators/voxel_generator.h"
#include "../../storage/modifiers_gd.h"
#include "../../terrain/fixed_lod/voxel_terrain.h"
#include "../../terrain/variable_lod/voxel_lod_terrain.h"
#include "../about_window.h"
@ -102,6 +103,10 @@ static bool is_side_handled(Object *p_object) {
if (wrapper != nullptr) {
return true;
}
gd::VoxelModifier *modifier = Object::cast_to<gd::VoxelModifier>(p_object);
if (modifier != nullptr) {
return true;
}
return false;
}

View File

@ -42,7 +42,6 @@ public:
};
virtual Result generate_block(VoxelQueryData &input);
// TODO Single sample
virtual bool supports_single_generation() const {
return false;

View File

@ -145,7 +145,10 @@ struct DeepSampler : transvoxel::IDeepSDFSampler {
RWLockRead rlock(lod.map_lock);
const Vector3i lod_bpos = lod_pos >> lod.map.get_block_size_pow2();
const VoxelDataBlock *block = lod.map.get_block(lod_bpos);
if (block != nullptr) {
// TODO Thread-safety: this checking presence of voxels is not safe.
// It can change while meshing takes place if a modifier is moved in the same area,
// because it invalidates cached data.
if (block != nullptr && block->has_voxels()) {
voxels = block->get_voxels_shared();
bsm = lod.map.get_block_size_mask();
}

View File

@ -20,6 +20,7 @@
#include "meshers/dmc/voxel_mesher_dmc.h"
#include "meshers/transvoxel/voxel_mesher_transvoxel.h"
#include "server/voxel_server_gd.h"
#include "storage/modifiers_gd.h"
#include "storage/voxel_buffer_gd.h"
#include "storage/voxel_memory_pool.h"
#include "storage/voxel_metadata_variant.h"
@ -160,6 +161,9 @@ void initialize_voxel_module(ModuleInitializationLevel p_level) {
ClassDB::register_class<VoxelInstanceGenerator>();
ClassDB::register_class<VoxelInstancer>();
ClassDB::register_class<VoxelInstanceComponent>();
ClassDB::register_abstract_class<gd::VoxelModifier>();
ClassDB::register_class<gd::VoxelModifierSphere>();
ClassDB::register_class<gd::VoxelModifierMesh>();
// Streams
ClassDB::register_abstract_class<VoxelStream>();

View File

@ -1,5 +1,6 @@
#include "generate_block_task.h"
#include "../storage/voxel_buffer_internal.h"
#include "../storage/voxel_data_map.h"
#include "../util/godot/funcs.h"
#include "../util/log.h"
#include "../util/profiling.h"
@ -43,6 +44,11 @@ void GenerateBlockTask::run(zylann::ThreadedTaskContext ctx) {
const VoxelGenerator::Result result = generator->generate_block(query_data);
max_lod_hint = result.max_lod_hint;
if (data != nullptr) {
data->modifiers.apply(
query_data.voxel_buffer, AABB(query_data.origin_in_voxels, query_data.voxel_buffer.get_size() << lod));
}
if (stream_dependency->valid) {
Ref<VoxelStream> stream = stream_dependency->stream;
@ -87,12 +93,20 @@ void GenerateBlockTask::apply_result() {
// The request response must match the dependency it would have been requested with.
// If it doesn't match, we are no longer interested in the result.
if (stream_dependency->valid) {
Ref<VoxelStream> stream = stream_dependency->stream;
VoxelServer::BlockDataOutput o;
o.voxels = voxels;
o.position = position;
o.lod = lod;
o.dropped = !has_run;
o.type = VoxelServer::BlockDataOutput::TYPE_GENERATED;
if (stream.is_valid() && stream->get_save_generator_output()) {
// We can't consider the block as "generated" since there is no state to tell that once saved,
// so it has to be considered an edited block
o.type = VoxelServer::BlockDataOutput::TYPE_LOADED;
} else {
o.type = VoxelServer::BlockDataOutput::TYPE_GENERATED;
}
o.max_lod_hint = max_lod_hint;
o.initial_load = false;

View File

@ -8,6 +8,8 @@
namespace zylann::voxel {
struct VoxelDataLodMap;
class GenerateBlockTask : public IThreadedTask {
public:
GenerateBlockTask();
@ -31,6 +33,7 @@ public:
bool drop_beyond_max_distance = true;
PriorityDependency priority_dependency;
std::shared_ptr<StreamingDependency> stream_dependency;
std::shared_ptr<VoxelDataLodMap> data;
std::shared_ptr<AsyncDependencyTracker> tracker;
};

View File

@ -15,13 +15,14 @@ std::atomic_int g_debug_load_block_tasks_count;
LoadBlockDataTask::LoadBlockDataTask(uint32_t p_volume_id, Vector3i p_block_pos, uint8_t p_lod, uint8_t p_block_size,
bool p_request_instances, std::shared_ptr<StreamingDependency> p_stream_dependency,
PriorityDependency p_priority_dependency) :
PriorityDependency p_priority_dependency, bool generate_cache_data) :
_priority_dependency(p_priority_dependency),
_position(p_block_pos),
_volume_id(p_volume_id),
_lod(p_lod),
_block_size(p_block_size),
_request_instances(p_request_instances),
_generate_cache_data(generate_cache_data),
//_request_voxels(true),
_stream_dependency(p_stream_dependency) {
//
@ -62,7 +63,7 @@ void LoadBlockDataTask::run(zylann::ThreadedTaskContext ctx) {
if (voxel_query_data.result == VoxelStream::RESULT_ERROR) {
ERR_PRINT("Error loading voxel block");
} else if (voxel_query_data.result == VoxelStream::RESULT_BLOCK_NOT_FOUND) {
} else if (voxel_query_data.result == VoxelStream::RESULT_BLOCK_NOT_FOUND && _generate_cache_data) {
Ref<VoxelGenerator> generator = _stream_dependency->generator;
if (generator.is_valid()) {
@ -76,7 +77,7 @@ void LoadBlockDataTask::run(zylann::ThreadedTaskContext ctx) {
task->priority_dependency = _priority_dependency;
VoxelServer::get_singleton().push_async_task(task);
_fallback_on_generator = true;
_requested_generator_task = true;
} else {
// If there is no generator... what do we do? What defines the format of that empty block?
@ -123,7 +124,7 @@ void LoadBlockDataTask::apply_result() {
// TODO Comparing pointer may not be guaranteed
// The request response must match the dependency it would have been requested with.
// If it doesn't match, we are no longer interested in the result.
if (_stream_dependency->valid && !_fallback_on_generator) {
if (_stream_dependency->valid && !_requested_generator_task) {
VoxelServer::BlockDataOutput o;
o.voxels = _voxels;
o.instances = std::move(_instances);

View File

@ -12,7 +12,7 @@ class LoadBlockDataTask : public IThreadedTask {
public:
LoadBlockDataTask(uint32_t p_volume_id, Vector3i p_block_pos, uint8_t p_lod, uint8_t p_block_size,
bool p_request_instances, std::shared_ptr<StreamingDependency> p_stream_dependency,
PriorityDependency p_priority_dependency);
PriorityDependency p_priority_dependency, bool generate_cache_data);
~LoadBlockDataTask();
@ -36,7 +36,8 @@ private:
bool _request_instances = false;
//bool _request_voxels = false;
bool _max_lod_hint = false;
bool _fallback_on_generator = false;
bool _generate_cache_data = true;
bool _requested_generator_task = false;
std::shared_ptr<StreamingDependency> _stream_dependency;
};

View File

@ -1,4 +1,5 @@
#include "mesh_block_task.h"
#include "../storage/voxel_data_map.h"
#include "../util/dstack.h"
#include "../util/log.h"
#include "../util/profiling.h"
@ -6,24 +7,21 @@
namespace zylann::voxel {
// Takes a list of blocks and interprets it as a cube of blocks centered around the area we want to create a mesh from.
// Voxels from central blocks are copied, and part of side blocks are also copied so we get a temporary buffer
// which includes enough neighbors for the mesher to avoid doing bound checks.
static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>> blocks, VoxelBufferInternal &dst,
int min_padding, int max_padding, int channels_mask, Ref<VoxelGenerator> generator, int data_block_size,
uint8_t lod_index, Vector3i mesh_block_pos) {
ZN_DSTACK();
ZN_PROFILE_SCOPE();
struct CubicAreaInfo {
int edge_size; // In data blocks
int mesh_block_size_factor;
unsigned int anchor_buffer_index;
// Extract wanted channels in a list
unsigned int channels_count = 0;
FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channels =
VoxelBufferInternal::mask_to_channels_list(channels_mask, channels_count);
inline bool is_valid() const {
return edge_size != 0;
}
};
CubicAreaInfo get_cubic_area_info_from_size(unsigned int size) {
// Determine size of the cube of blocks
int edge_size;
int mesh_block_size_factor;
switch (blocks.size()) {
switch (size) {
case 3 * 3 * 3:
edge_size = 3;
mesh_block_size_factor = 1;
@ -33,19 +31,41 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
mesh_block_size_factor = 2;
break;
default:
ERR_FAIL_MSG("Unsupported block count");
ZN_PRINT_ERROR("Unsupported block count");
return CubicAreaInfo{ 0, 0, 0 };
}
// Pick anchor block, usually within the central part of the cube (that block must be valid)
const unsigned int anchor_buffer_index = edge_size * edge_size + edge_size + 1;
std::shared_ptr<VoxelBufferInternal> &central_buffer = blocks[anchor_buffer_index];
return { edge_size, mesh_block_size_factor, anchor_buffer_index };
}
// Takes a list of blocks and interprets it as a cube of blocks centered around the area we want to create a mesh from.
// Voxels from central blocks are copied, and part of side blocks are also copied so we get a temporary buffer
// which includes enough neighbors for the mesher to avoid doing bound checks.
static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>> blocks, VoxelBufferInternal &dst,
int min_padding, int max_padding, int channels_mask, Ref<VoxelGenerator> generator,
const VoxelModifierStack *modifiers, int data_block_size, uint8_t lod_index, Vector3i mesh_block_pos) {
ZN_DSTACK();
ZN_PROFILE_SCOPE();
// Extract wanted channels in a list
unsigned int channels_count = 0;
FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channels =
VoxelBufferInternal::mask_to_channels_list(channels_mask, channels_count);
// Determine size of the cube of blocks
const CubicAreaInfo area_info = get_cubic_area_info_from_size(blocks.size());
ERR_FAIL_COND(!area_info.is_valid());
std::shared_ptr<VoxelBufferInternal> &central_buffer = blocks[area_info.anchor_buffer_index];
ERR_FAIL_COND_MSG(central_buffer == nullptr && generator.is_null(), "Central buffer must be valid");
if (central_buffer != nullptr) {
ERR_FAIL_COND_MSG(
Vector3iUtil::all_members_equal(central_buffer->get_size()) == false, "Central buffer must be cubic");
}
const int mesh_block_size = data_block_size * mesh_block_size_factor;
const int mesh_block_size = data_block_size * area_info.mesh_block_size_factor;
const int padded_mesh_block_size = mesh_block_size + min_padding + max_padding;
dst.create(padded_mesh_block_size, padded_mesh_block_size, padded_mesh_block_size);
@ -59,9 +79,7 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
const std::shared_ptr<VoxelBufferInternal> &buffer = blocks[i];
if (buffer != nullptr) {
// Initialize channel depths from the first non-null block found
for (unsigned int ci = 0; ci < channels.size(); ++ci) {
dst.set_channel_depth(ci, buffer->get_channel_depth(ci));
}
dst.copy_format(*buffer);
break;
}
}
@ -71,15 +89,16 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
std::vector<Box3i> boxes_to_generate;
const Box3i mesh_data_box = Box3i::from_min_max(min_pos, max_pos);
if (generator.is_valid()) {
const bool has_generator = generator.is_valid() || modifiers != nullptr;
if (has_generator) {
boxes_to_generate.push_back(mesh_data_box);
}
// Using ZXY as convention to reconstruct positions with thread locking consistency
unsigned int block_index = 0;
for (int z = -1; z < edge_size - 1; ++z) {
for (int x = -1; x < edge_size - 1; ++x) {
for (int y = -1; y < edge_size - 1; ++y) {
for (int z = -1; z < area_info.edge_size - 1; ++z) {
for (int x = -1; x < area_info.edge_size - 1; ++x) {
for (int y = -1; y < area_info.edge_size - 1; ++y) {
const Vector3i offset = data_block_size * Vector3i(x, y, z);
const std::shared_ptr<VoxelBufferInternal> &src = blocks[block_index];
++block_index;
@ -98,7 +117,7 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
}
}
if (generator.is_valid()) {
if (has_generator) {
// Subtract edited box from the area to generate
// TODO This approach allows to batch boxes if necessary,
// but is it just better to do it anyways for every clipped box?
@ -120,12 +139,13 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
}
}
if (generator.is_valid()) {
if (has_generator) {
// Complete data with generated voxels
ZN_PROFILE_SCOPE_NAMED("Generate");
VoxelBufferInternal generated_voxels;
const Vector3i origin_in_voxels = mesh_block_pos * (mesh_block_size_factor * data_block_size << lod_index);
const Vector3i origin_in_voxels =
mesh_block_pos * (area_info.mesh_block_size_factor * data_block_size << lod_index);
for (unsigned int i = 0; i < boxes_to_generate.size(); ++i) {
const Box3i &box = boxes_to_generate[i];
@ -134,7 +154,13 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
//generated_voxels.set_voxel_f(2.0f, box.size.x / 2, box.size.y / 2, box.size.z / 2,
//VoxelBufferInternal::CHANNEL_SDF);
VoxelGenerator::VoxelQueryData q{ generated_voxels, (box.pos << lod_index) + origin_in_voxels, lod_index };
generator->generate_block(q);
if (generator.is_valid()) {
generator->generate_block(q);
}
if (modifiers != nullptr) {
modifiers->apply(q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << lod_index));
}
for (unsigned int ci = 0; ci < channels_count; ++ci) {
dst.copy_from(generated_voxels, Vector3i(), generated_voxels.get_size(),
@ -172,15 +198,50 @@ void MeshBlockTask::run(zylann::ThreadedTaskContext ctx) {
const unsigned int min_padding = mesher->get_minimum_padding();
const unsigned int max_padding = mesher->get_maximum_padding();
// TODO Cache?
const VoxelModifierStack *modifiers = data != nullptr ? &data->modifiers : nullptr;
VoxelBufferInternal voxels;
copy_block_and_neighbors(to_span(blocks, blocks_count), voxels, min_padding, max_padding,
mesher->get_used_channels_mask(), meshing_dependency->generator, data_block_size, lod, position);
mesher->get_used_channels_mask(), meshing_dependency->generator, modifiers, data_block_size, lod_index,
position);
const Vector3i origin_in_voxels = position * (int(data_block_size) << lod);
// Could cache generator data from here if it was safe to write into the map
/*if (data != nullptr && cache_generated_blocks) {
const CubicAreaInfo area_info = get_cubic_area_info_from_size(blocks.size());
ERR_FAIL_COND(!area_info.is_valid());
const VoxelMesher::Input input = { voxels, meshing_dependency->generator.ptr(), data.get(), origin_in_voxels, lod,
collision_hint };
VoxelDataLodMap::Lod &lod = data->lods[lod_index];
// Note, this box does not include neighbors!
const Vector3i min_bpos = position * area_info.mesh_block_size_factor;
const Vector3i max_bpos = min_bpos + Vector3iUtil::create(area_info.edge_size - 2);
Vector3i bpos;
for (bpos.z = min_bpos.z; bpos.z < max_bpos.z; ++bpos.z) {
for (bpos.x = min_bpos.x; bpos.x < max_bpos.x; ++bpos.x) {
for (bpos.y = min_bpos.y; bpos.y < max_bpos.y; ++bpos.y) {
// {
// RWLockRead rlock(lod.map_lock);
// VoxelDataBlock *block = lod.map.get_block(bpos);
// if (block != nullptr && (block->is_edited() || block->is_modified())) {
// continue;
// }
// }
std::shared_ptr<VoxelBufferInternal> &cache_buffer = make_shared_instance<VoxelBufferInternal>();
cache_buffer->copy_format(voxels);
const Vector3i min_src_pos =
(bpos - min_bpos) * data_block_size + Vector3iUtil::create(min_padding);
cache_buffer->copy_from(voxels, min_src_pos, min_src_pos + cache_buffer->get_size(), Vector3i());
// TODO Where to put voxels? Can't safely write to data at the moment.
}
}
}
}*/
const Vector3i origin_in_voxels = position * (int(data_block_size) << lod_index);
const VoxelMesher::Input input = { voxels, meshing_dependency->generator.ptr(), data.get(), origin_in_voxels,
lod_index, collision_hint };
mesher->build(_surfaces_output, input);
_has_run = true;
@ -188,7 +249,7 @@ void MeshBlockTask::run(zylann::ThreadedTaskContext ctx) {
int MeshBlockTask::get_priority() {
float closest_viewer_distance_sq;
const int p = priority_dependency.evaluate(lod, &closest_viewer_distance_sq);
const int p = priority_dependency.evaluate(lod_index, &closest_viewer_distance_sq);
_too_far = closest_viewer_distance_sq > priority_dependency.drop_distance_squared;
return p;
}
@ -213,7 +274,7 @@ void MeshBlockTask::apply_result() {
}
o.position = position;
o.lod = lod;
o.lod = lod_index;
o.surfaces = std::move(_surfaces_output);
VoxelServer::VolumeCallbacks callbacks = VoxelServer::get_singleton().get_volume_callbacks(volume_id);

View File

@ -28,7 +28,7 @@ public:
//FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channel_depths;
Vector3i position; // In mesh blocks of the specified lod
uint32_t volume_id;
uint8_t lod = 0;
uint8_t lod_index = 0;
uint8_t blocks_count = 0;
uint8_t data_block_size = 0;
bool collision_hint = false;

View File

@ -36,13 +36,16 @@ public:
};
Type type;
// If voxels are null with TYPE_LOADED, it means no block was found in the stream (if any) and no generator task
// was scheduled. This is the case when we don't want to cache blocks of generated data.
std::shared_ptr<VoxelBufferInternal> voxels;
UniquePtr<InstanceBlockData> instances;
Vector3i position;
uint8_t lod;
bool dropped;
bool max_lod_hint;
// Blocks with this flag set should not be ignored
// Blocks with this flag set should not be ignored.
// This is used when data streaming is off, all blocks are loaded at once.
bool initial_load;
};

381
storage/modifiers.cpp Normal file
View File

@ -0,0 +1,381 @@
#include "modifiers.h"
#include "../edition/funcs.h"
#include "../util/dstack.h"
#include "../util/math/conv.h"
#include "../util/profiling.h"
namespace zylann::voxel {
namespace {
thread_local std::vector<float> tls_sdf;
thread_local std::vector<Vector3> tls_positions;
Span<const Vector3> get_positions_temporary(Vector3i buffer_size, Vector3 origin, Vector3 size) {
tls_positions.resize(Vector3iUtil::get_volume(buffer_size));
Span<Vector3> positions = to_span(tls_positions);
const Vector3 end = origin + size;
const Vector3 bsf = buffer_size;
unsigned int i = 0;
for (int z = 0; z < buffer_size.z; ++z) {
for (int x = 0; x < buffer_size.x; ++x) {
for (int y = 0; y < buffer_size.y; ++y) {
positions[i] = math::lerp(origin, end, Vector3(x / bsf.x, y / bsf.y, z / bsf.z));
++i;
}
}
}
return positions;
}
Span<float> decompress_sdf_to_temporary(VoxelBufferInternal &voxels) {
ZN_DSTACK();
const Vector3i bs = voxels.get_size();
tls_sdf.resize(Vector3iUtil::get_volume(bs));
Span<float> sdf = to_span(tls_sdf);
const VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
voxels.decompress_channel(channel);
const VoxelBufferInternal::Depth depth = voxels.get_channel_depth(channel);
switch (depth) {
case VoxelBufferInternal::DEPTH_8_BIT: {
Span<int8_t> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
sdf[i] = s8_to_snorm(raw[i]);
}
} break;
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<int16_t> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
sdf[i] = s16_to_snorm(raw[i]);
}
} break;
case VoxelBufferInternal::DEPTH_32_BIT: {
Span<float> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
memcpy(sdf.data(), raw.data(), sizeof(float) * sdf.size());
} break;
case VoxelBufferInternal::DEPTH_64_BIT: {
Span<double> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
sdf[i] = raw[i];
}
} break;
default:
ZN_CRASH();
}
const float inv_scale = 1.0f / VoxelBufferInternal::get_sdf_quantization_scale(depth);
for (unsigned int i = 0; i < sdf.size(); ++i) {
sdf[i] *= inv_scale;
}
return sdf;
}
void store_sdf(VoxelBufferInternal &voxels, Span<float> sdf) {
const VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
const VoxelBufferInternal::Depth depth = voxels.get_channel_depth(channel);
const float scale = VoxelBufferInternal::get_sdf_quantization_scale(depth);
for (unsigned int i = 0; i < sdf.size(); ++i) {
sdf[i] *= scale;
}
switch (depth) {
case VoxelBufferInternal::DEPTH_8_BIT: {
Span<int8_t> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
raw[i] = snorm_to_s8(sdf[i]);
}
} break;
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<int16_t> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
raw[i] = snorm_to_s16(sdf[i]);
}
} break;
case VoxelBufferInternal::DEPTH_32_BIT: {
Span<float> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
memcpy(raw.data(), sdf.data(), sizeof(float) * sdf.size());
} break;
case VoxelBufferInternal::DEPTH_64_BIT: {
Span<double> raw;
ZN_ASSERT(voxels.get_channel_data(channel, raw));
for (unsigned int i = 0; i < sdf.size(); ++i) {
raw[i] = sdf[i];
}
} break;
default:
ZN_CRASH();
}
}
} //namespace
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VoxelModifierStack::VoxelModifierStack() {}
VoxelModifierStack::VoxelModifierStack(VoxelModifierStack &&other) {
move_from_noclear(other);
}
VoxelModifierStack &VoxelModifierStack::operator=(VoxelModifierStack &&other) {
clear();
move_from_noclear(other);
_next_id = other._next_id;
return *this;
}
void VoxelModifierStack::move_from_noclear(VoxelModifierStack &other) {
{
RWLockRead rlock(other._stack_lock);
_modifiers = std::move(other._modifiers);
_stack = std::move(other._stack);
}
_next_id = other._next_id;
}
uint32_t VoxelModifierStack::allocate_id() {
return ++_next_id;
}
void VoxelModifierStack::remove_modifier(uint32_t id) {
RWLockWrite lock(_stack_lock);
auto map_it = _modifiers.find(id);
ZN_ASSERT_RETURN(map_it != _modifiers.end());
const VoxelModifier *ptr = map_it->second.get();
for (auto stack_it = _stack.begin(); stack_it != _stack.end(); ++stack_it) {
if (*stack_it == ptr) {
_stack.erase(stack_it);
break;
}
}
_modifiers.erase(map_it);
}
bool VoxelModifierStack::has_modifier(uint32_t id) const {
return _modifiers.find(id) != _modifiers.end();
}
VoxelModifier *VoxelModifierStack::get_modifier(uint32_t id) const {
auto it = _modifiers.find(id);
if (it != _modifiers.end()) {
return it->second.get();
}
return nullptr;
}
void VoxelModifierStack::apply(VoxelBufferInternal &voxels, AABB aabb) const {
ZN_PROFILE_SCOPE();
RWLockRead lock(_stack_lock);
if (_stack.size() == 0) {
return;
}
VoxelModifierContext ctx;
bool any_intersection = false;
for (unsigned int i = 0; i < _stack.size(); ++i) {
const VoxelModifier *modifier = _stack[i];
ZN_ASSERT(modifier != nullptr);
if (modifier->get_aabb().intersects(aabb)) {
if (any_intersection == false) {
any_intersection = true;
ctx.positions = get_positions_temporary(voxels.get_size(), aabb.position, aabb.size);
ctx.sdf = decompress_sdf_to_temporary(voxels);
}
modifier->apply(ctx);
}
}
if (any_intersection) {
store_sdf(voxels, ctx.sdf);
voxels.compress_uniform_channels();
}
}
void VoxelModifierStack::apply(float &sdf, Vector3 position) const {
ZN_PROFILE_SCOPE();
RWLockRead lock(_stack_lock);
if (_stack.size() == 0) {
return;
}
VoxelModifierContext ctx;
ctx.positions = Span<Vector3>(&position, 1);
ctx.sdf = Span<float>(&sdf, 1);
const AABB aabb(position, Vector3(1, 1, 1));
for (unsigned int i = 0; i < _stack.size(); ++i) {
const VoxelModifier *modifier = _stack[i];
ZN_ASSERT(modifier != nullptr);
if (modifier->get_aabb().intersects(aabb)) {
modifier->apply(ctx);
}
}
}
void VoxelModifierStack::clear() {
RWLockWrite lock(_stack_lock);
_stack.clear();
_modifiers.clear();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void VoxelModifierSphere::set_radius(real_t radius) {
RWLockWrite wlock(_rwlock);
if (radius == _radius) {
return;
}
_radius = radius;
update_aabb();
}
real_t VoxelModifierSphere::get_radius() const {
return _radius;
}
void VoxelModifierSphere::update_aabb() {
const float extent = _radius * 1.25f + get_smoothness();
const float extent2 = 2.0 * extent;
_aabb = AABB(get_transform().origin - Vector3(extent, extent, extent), Vector3(extent2, extent2, extent2));
}
void VoxelModifierSphere::apply(VoxelModifierContext ctx) const {
ZN_PROFILE_SCOPE();
RWLockRead rlock(_rwlock);
const float smoothness = get_smoothness();
const Vector3 center = get_transform().origin;
const float sdf_scale = 1.0f;
// TODO Support transform scale
switch (get_operation()) {
case OP_ADD:
for (unsigned int i = 0; i < ctx.sdf.size(); ++i) {
const float sd = sdf_scale * math::sdf_sphere(ctx.positions[i], center, _radius);
ctx.sdf[i] = math::sdf_smooth_union(ctx.sdf[i], sd, smoothness);
}
break;
case OP_SUBTRACT:
for (unsigned int i = 0; i < ctx.sdf.size(); ++i) {
const float sd = sdf_scale * math::sdf_sphere(ctx.positions[i], center, _radius);
ctx.sdf[i] = math::sdf_smooth_subtract(ctx.sdf[i], sd, smoothness);
}
break;
default:
ZN_CRASH();
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void VoxelModifierBuffer::set_buffer(std::shared_ptr<VoxelBufferInternal> buffer, Vector3f min_pos, Vector3f max_pos) {
//ZN_ASSERT_RETURN(buffer != nullptr);
RWLockWrite wlock(_rwlock);
_buffer = buffer;
_min_pos = min_pos;
_max_pos = max_pos;
update_aabb();
}
void VoxelModifierBuffer::set_isolevel(float isolevel) {
RWLockWrite wlock(_rwlock);
if (isolevel == isolevel) {
return;
}
_isolevel = isolevel;
}
float get_largest_coord(Vector3 v) {
return math::max(math::max(v.x, v.y), v.z);
}
void VoxelModifierBuffer::apply(VoxelModifierContext ctx) const {
ZN_PROFILE_SCOPE();
RWLockRead rlock(_rwlock);
if (_buffer == nullptr) {
return;
}
// TODO VoxelMeshSDF isn't preventing scripts from writing into this buffer from a different thread.
// I can't think of a reason to manually modify the buffer of a VoxelMeshSDF at the moment.
RWLockRead buffer_rlock(_buffer->get_lock());
const Transform3D model_to_world = get_transform();
const Transform3D buffer_to_model =
Transform3D(Basis().scaled(to_vec3(_max_pos - _min_pos) / to_vec3(_buffer->get_size())), to_vec3(_min_pos));
const Transform3D buffer_to_world = model_to_world * buffer_to_model;
Span<const float> buffer_sdf;
ZN_ASSERT_RETURN(_buffer->get_channel_data(VoxelBufferInternal::CHANNEL_SDF, buffer_sdf));
const float smoothness = get_smoothness();
ops::SdfBufferShape shape;
shape.buffer = buffer_sdf;
shape.buffer_size = _buffer->get_size();
shape.isolevel = _isolevel;
shape.sdf_scale = get_largest_coord(model_to_world.get_basis().get_scale());
shape.world_to_buffer = buffer_to_world.affine_inverse();
switch (get_operation()) {
case OP_ADD:
for (unsigned int i = 0; i < ctx.sdf.size(); ++i) {
const float sd = shape(ctx.positions[i]);
ctx.sdf[i] = math::sdf_smooth_union(ctx.sdf[i], sd, smoothness);
}
break;
case OP_SUBTRACT:
for (unsigned int i = 0; i < ctx.sdf.size(); ++i) {
const float sd = shape(ctx.positions[i]);
ctx.sdf[i] = math::sdf_smooth_subtract(ctx.sdf[i], sd, smoothness);
}
break;
default:
ZN_CRASH();
}
}
void VoxelModifierBuffer::update_aabb() {
const Transform3D &model_to_world = get_transform();
_aabb = model_to_world.xform(AABB(to_vec3(_min_pos), to_vec3(_max_pos - _min_pos)));
}
} // namespace zylann::voxel

184
storage/modifiers.h Normal file
View File

@ -0,0 +1,184 @@
#ifndef VOXEL_MODIFIERS_H
#define VOXEL_MODIFIERS_H
#include "../util/math/sdf.h"
#include "../util/math/vector3.h"
#include "../util/math/vector3f.h"
#include "../util/thread/rw_lock.h"
#include "voxel_buffer_internal.h"
#include <core/math/transform_3d.h>
#include <unordered_map>
namespace zylann::voxel {
struct VoxelModifierContext {
Span<float> sdf;
Span<const Vector3> positions;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifier {
public:
enum Type { TYPE_SPHERE, TYPE_BUFFER };
virtual ~VoxelModifier() {}
virtual void apply(VoxelModifierContext ctx) const = 0;
const Transform3D &get_transform() const {
return _transform;
}
void set_transform(Transform3D t) {
if (t == _transform) {
return;
}
_transform = t;
update_aabb();
}
const AABB &get_aabb() const {
return _aabb;
}
virtual Type get_type() const = 0;
virtual bool is_sdf() const = 0;
protected:
virtual void update_aabb() = 0;
AABB _aabb;
private:
Transform3D _transform;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierSdf : public VoxelModifier {
public:
enum Operation { //
OP_ADD,
OP_SUBTRACT
};
inline Operation get_operation() const {
return _operation;
}
void set_operation(Operation op) {
RWLockWrite wlock(_rwlock);
_operation = op;
}
inline float get_smoothness() const {
return _smoothness;
}
void set_smoothness(float p_smoothness) {
RWLockWrite wlock(_rwlock);
const float smoothness = math::max(p_smoothness, 0.f);
if (smoothness == _smoothness) {
return;
}
_smoothness = smoothness;
update_aabb();
}
bool is_sdf() const override {
return true;
}
protected:
RWLock _rwlock;
private:
Operation _operation = OP_ADD;
float _smoothness = 0.f;
//float _margin = 0.f;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierSphere : public VoxelModifierSdf {
public:
Type get_type() const override {
return TYPE_SPHERE;
};
void set_radius(real_t radius);
real_t get_radius() const;
void apply(VoxelModifierContext ctx) const override;
protected:
void update_aabb() override;
private:
real_t _radius = 10.f;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierBuffer : public VoxelModifierSdf {
public:
Type get_type() const override {
return TYPE_BUFFER;
};
void set_buffer(std::shared_ptr<VoxelBufferInternal> buffer, Vector3f min_pos, Vector3f max_pos);
void set_isolevel(float isolevel);
void apply(VoxelModifierContext ctx) const override;
protected:
void update_aabb() override;
private:
std::shared_ptr<VoxelBufferInternal> _buffer;
Vector3f _min_pos;
Vector3f _max_pos;
float _isolevel;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierStack {
public:
uint32_t allocate_id();
VoxelModifierStack();
VoxelModifierStack(VoxelModifierStack &&other);
VoxelModifierStack &operator=(VoxelModifierStack &&other);
template <typename T>
T *add_modifier(uint32_t id) {
ZN_ASSERT(!has_modifier(id));
UniquePtr<VoxelModifier> &uptr = _modifiers[id];
uptr = make_unique_instance<T>();
VoxelModifier *ptr = uptr.get();
RWLockWrite lock(_stack_lock);
_stack.push_back(ptr);
return static_cast<T *>(ptr);
}
void remove_modifier(uint32_t id);
bool has_modifier(uint32_t id) const;
VoxelModifier *get_modifier(uint32_t id) const;
void apply(VoxelBufferInternal &voxels, AABB aabb) const;
void apply(float &sdf, Vector3 position) const;
void clear();
private:
void move_from_noclear(VoxelModifierStack &other);
std::unordered_map<uint32_t, UniquePtr<VoxelModifier>> _modifiers;
uint32_t _next_id = 1;
// TODO Later, replace this with a spatial acceleration structure based on AABBs, like BVH
std::vector<VoxelModifier *> _stack;
RWLock _stack_lock;
};
} // namespace zylann::voxel
#endif // VOXEL_MODIFIERS_H

307
storage/modifiers_gd.cpp Normal file
View File

@ -0,0 +1,307 @@
#include "modifiers_gd.h"
#include "../terrain/variable_lod/voxel_lod_terrain.h"
#include "../util/errors.h"
#include "../util/math/conv.h"
namespace zylann::voxel::gd {
void post_edit_modifier(VoxelLodTerrain &volume, AABB aabb) {
volume.post_edit_modifiers(Box3i(math::floor_to_int(aabb.position), math::floor_to_int(aabb.size)));
}
// template <typename Modifier_T, typename F>
// void edit_modifier(VoxelLodTerrain &volume, Modifier_T &modifier, F action) {
// const AABB prev_aabb = modifier->get_aabb();
// action(modifier);
// const AABB new_aabb = modifier->get_aabb();
// post_edit_modifier(volume, prev_aabb);
// post_edit_modifier(volume, new_aabb);
// }
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VoxelModifier::VoxelModifier() {
set_notify_local_transform(true);
}
zylann::voxel::VoxelModifier *VoxelModifier::create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id) {
ZN_PRINT_ERROR("Not implemented");
return nullptr;
}
zylann::voxel::VoxelModifierSdf::Operation to_op(VoxelModifier::Operation op) {
return zylann::voxel::VoxelModifierSdf::Operation(op);
}
void VoxelModifier::set_operation(Operation op) {
ZN_ASSERT_RETURN(op >= 0 && op < OPERATION_COUNT);
if (op == _operation) {
return;
}
_operation = op;
if (_volume == nullptr) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
ZN_ASSERT_RETURN(modifier->is_sdf());
zylann::voxel::VoxelModifierSdf *sdf_modifier = static_cast<zylann::voxel::VoxelModifierSdf *>(modifier);
sdf_modifier->set_operation(to_op(_operation));
post_edit_modifier(*_volume, modifier->get_aabb());
}
VoxelModifier::Operation VoxelModifier::get_operation() const {
return _operation;
}
void VoxelModifier::set_smoothness(float s) {
if (s == _smoothness) {
return;
}
_smoothness = math::max(s, 0.f);
if (_volume == nullptr) {
return;
}
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
ZN_ASSERT_RETURN(modifier->is_sdf());
zylann::voxel::VoxelModifierSdf *sdf_modifier = static_cast<zylann::voxel::VoxelModifierSdf *>(modifier);
const AABB prev_aabb = modifier->get_aabb();
sdf_modifier->set_smoothness(_smoothness);
const AABB new_aabb = modifier->get_aabb();
post_edit_modifier(*_volume, prev_aabb);
post_edit_modifier(*_volume, new_aabb);
}
float VoxelModifier::get_smoothness() const {
return _smoothness;
}
void VoxelModifier::_notification(int p_what) {
switch (p_what) {
case Node::NOTIFICATION_PARENTED: {
Node *parent = get_parent();
ZN_ASSERT_RETURN(parent != nullptr);
ZN_ASSERT_RETURN(_volume == nullptr);
VoxelLodTerrain *volume = Object::cast_to<VoxelLodTerrain>(parent);
_volume = volume;
if (_volume != nullptr) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
const uint32_t id = modifiers.allocate_id();
zylann::voxel::VoxelModifier *modifier = create(modifiers, id);
if (modifier->is_sdf()) {
zylann::voxel::VoxelModifierSdf *sdf_modifier =
static_cast<zylann::voxel::VoxelModifierSdf *>(modifier);
sdf_modifier->set_operation(to_op(_operation));
sdf_modifier->set_smoothness(_smoothness);
}
modifier->set_transform(get_transform());
_modifier_id = id;
// TODO Optimize: on loading of a scene, this could be very bad for performance because there could be,
// a lot of modifiers on the map, but there is no distinction possible in Godot at the moment...
post_edit_modifier(*_volume, modifier->get_aabb());
}
} break;
case Node::NOTIFICATION_UNPARENTED: {
if (_volume != nullptr) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN_MSG(modifier != nullptr, "The modifier node wasn't linked properly");
post_edit_modifier(*_volume, modifier->get_aabb());
modifiers.remove_modifier(_modifier_id);
_volume = nullptr;
_modifier_id = 0;
}
} break;
case Node3D::NOTIFICATION_LOCAL_TRANSFORM_CHANGED: {
if (_volume != nullptr && is_inside_tree()) {
std::shared_ptr<VoxelDataLodMap> data = _volume->get_storage();
VoxelModifierStack &modifiers = data->modifiers;
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(_modifier_id);
ZN_ASSERT_RETURN(modifier != nullptr);
const AABB prev_aabb = modifier->get_aabb();
modifier->set_transform(get_transform());
const AABB aabb = modifier->get_aabb();
post_edit_modifier(*_volume, prev_aabb);
post_edit_modifier(*_volume, aabb);
// TODO Handle nesting properly, though it's a pain in the ass
// When the terrain is moved, the local transform of modifiers technically changes too.
// However it did not change relative to the terrain. But because we don't have a way to check that,
// all modifiers will trigger updates at the same time...
}
} break;
}
}
void VoxelModifier::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_operation", "op"), &VoxelModifier::set_operation);
ClassDB::bind_method(D_METHOD("get_operation"), &VoxelModifier::get_operation);
ClassDB::bind_method(D_METHOD("set_smoothness", "smoothness"), &VoxelModifier::set_smoothness);
ClassDB::bind_method(D_METHOD("get_smoothness"), &VoxelModifier::get_smoothness);
ADD_PROPERTY(PropertyInfo(Variant::INT, "operation", PROPERTY_HINT_ENUM, "Add,Remove"), "set_operation",
"get_operation");
ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "smoothness", PROPERTY_HINT_RANGE, "0.0, 100.0, 0.1"), "set_smoothness",
"get_smoothness");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
T *get_modifier(VoxelLodTerrain &volume, uint32_t id, zylann::voxel::VoxelModifier::Type type) {
std::shared_ptr<VoxelDataLodMap> data = volume.get_storage();
VoxelModifierStack &modifiers = data->modifiers;
zylann::voxel::VoxelModifier *modifier = modifiers.get_modifier(id);
ZN_ASSERT_RETURN_V(modifier != nullptr, nullptr);
ZN_ASSERT_RETURN_V(modifier->get_type() == type, nullptr);
return static_cast<T *>(modifier);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
zylann::voxel::VoxelModifierSphere *get_sphere(VoxelLodTerrain &volume, uint32_t id) {
return get_modifier<zylann::voxel::VoxelModifierSphere>(volume, id, zylann::voxel::VoxelModifier::TYPE_SPHERE);
}
float VoxelModifierSphere::get_radius() const {
return _radius;
}
void VoxelModifierSphere::set_radius(float r) {
_radius = math::max(r, 0.f);
if (_volume == nullptr) {
return;
}
zylann::voxel::VoxelModifierSphere *sphere = get_sphere(*_volume, _modifier_id);
ZN_ASSERT_RETURN(sphere != nullptr);
const AABB prev_aabb = sphere->get_aabb();
sphere->set_radius(r);
const AABB new_aabb = sphere->get_aabb();
post_edit_modifier(*_volume, prev_aabb);
post_edit_modifier(*_volume, new_aabb);
}
zylann::voxel::VoxelModifier *VoxelModifierSphere::create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id) {
zylann::voxel::VoxelModifierSphere *sphere = modifiers.add_modifier<zylann::voxel::VoxelModifierSphere>(id);
sphere->set_radius(_radius);
return sphere;
}
void VoxelModifierSphere::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_radius", "r"), &VoxelModifierSphere::set_radius);
ClassDB::bind_method(D_METHOD("get_radius"), &VoxelModifierSphere::get_radius);
ADD_PROPERTY(
PropertyInfo(Variant::FLOAT, "radius", PROPERTY_HINT_RANGE, "0.0, 100.0, 0.1"), "set_radius", "get_radius");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
zylann::voxel::VoxelModifierBuffer *get_buffer_modifier(VoxelLodTerrain &volume, uint32_t id) {
return get_modifier<zylann::voxel::VoxelModifierBuffer>(volume, id, zylann::voxel::VoxelModifier::TYPE_BUFFER);
}
static void set_buffer(zylann::voxel::VoxelModifierBuffer &bmod, Ref<VoxelMeshSDF> mesh_sdf) {
if (mesh_sdf.is_null() || mesh_sdf->get_voxel_buffer() == nullptr) {
bmod.set_buffer(nullptr, Vector3f(), Vector3f());
} else {
const AABB aabb = mesh_sdf->get_aabb();
bmod.set_buffer(mesh_sdf->get_voxel_buffer()->get_buffer_shared(), to_vec3f(aabb.position),
to_vec3f(aabb.position + aabb.size));
}
}
void VoxelModifierMesh::set_mesh_sdf(Ref<VoxelMeshSDF> mesh_sdf) {
if (mesh_sdf == _mesh_sdf) {
return;
}
if (_mesh_sdf.is_valid()) {
_mesh_sdf->disconnect("baked", callable_mp(this, &VoxelModifierMesh::_on_mesh_sdf_baked));
}
_mesh_sdf = mesh_sdf;
if (_mesh_sdf.is_valid()) {
_mesh_sdf->connect("baked", callable_mp(this, &VoxelModifierMesh::_on_mesh_sdf_baked));
}
if (_volume == nullptr) {
return;
}
zylann::voxel::VoxelModifierBuffer *bmod = get_buffer_modifier(*_volume, _modifier_id);
ZN_ASSERT_RETURN(bmod != nullptr);
const AABB prev_aabb = bmod->get_aabb();
set_buffer(*bmod, _mesh_sdf);
const AABB new_aabb = bmod->get_aabb();
post_edit_modifier(*_volume, prev_aabb);
post_edit_modifier(*_volume, new_aabb);
}
Ref<VoxelMeshSDF> VoxelModifierMesh::get_mesh_sdf() const {
return _mesh_sdf;
}
void VoxelModifierMesh::set_isolevel(float isolevel) {
if (isolevel == _isolevel) {
return;
}
_isolevel = isolevel;
if (_volume == nullptr) {
return;
}
zylann::voxel::VoxelModifierBuffer *bmod = get_buffer_modifier(*_volume, _modifier_id);
ZN_ASSERT_RETURN(bmod != nullptr);
bmod->set_isolevel(_isolevel);
post_edit_modifier(*_volume, bmod->get_aabb());
}
float VoxelModifierMesh::get_isolevel() const {
return _isolevel;
}
zylann::voxel::VoxelModifier *VoxelModifierMesh::create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id) {
zylann::voxel::VoxelModifierBuffer *bmod = modifiers.add_modifier<zylann::voxel::VoxelModifierBuffer>(id);
set_buffer(*bmod, _mesh_sdf);
bmod->set_isolevel(_isolevel);
return bmod;
}
void VoxelModifierMesh::_on_mesh_sdf_baked() {
if (_volume == nullptr) {
return;
}
zylann::voxel::VoxelModifierBuffer *bmod = get_buffer_modifier(*_volume, _modifier_id);
ZN_ASSERT_RETURN(bmod != nullptr);
const AABB prev_aabb = bmod->get_aabb();
set_buffer(*bmod, _mesh_sdf);
const AABB new_aabb = bmod->get_aabb();
post_edit_modifier(*_volume, prev_aabb);
post_edit_modifier(*_volume, new_aabb);
}
void VoxelModifierMesh::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_mesh_sdf", "mesh_sdf"), &VoxelModifierMesh::set_mesh_sdf);
ClassDB::bind_method(D_METHOD("get_mesh_sdf"), &VoxelModifierMesh::get_mesh_sdf);
ClassDB::bind_method(D_METHOD("set_isolevel", "isolevel"), &VoxelModifierMesh::set_isolevel);
ClassDB::bind_method(D_METHOD("get_isolevel"), &VoxelModifierMesh::get_isolevel);
ADD_PROPERTY(
PropertyInfo(Variant::OBJECT, "mesh_sdf", PROPERTY_HINT_RESOURCE_TYPE, VoxelMeshSDF::get_class_static()),
"set_mesh_sdf", "get_mesh_sdf");
ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "isolevel", PROPERTY_HINT_RANGE, "-100.0, 100.0, 0.01"), "set_isolevel",
"get_isolevel");
}
} // namespace zylann::voxel::gd

92
storage/modifiers_gd.h Normal file
View File

@ -0,0 +1,92 @@
#ifndef VOXEL_MODIFIERS_GD_H
#define VOXEL_MODIFIERS_GD_H
#include "../edition/voxel_mesh_sdf_gd.h"
#include <scene/3d/node_3d.h>
namespace zylann::voxel {
class VoxelLodTerrain;
class VoxelModifier;
class VoxelModifierStack;
namespace gd {
class VoxelModifier : public Node3D {
GDCLASS(VoxelModifier, Node3D)
public:
enum Operation { //
OPERATION_ADD,
OPERATION_REMOVE,
OPERATION_COUNT
};
VoxelModifier();
void set_operation(Operation op);
Operation get_operation() const;
void set_smoothness(float s);
float get_smoothness() const;
protected:
virtual zylann::voxel::VoxelModifier *create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id);
void _notification(int p_what);
VoxelLodTerrain *_volume = nullptr;
uint32_t _modifier_id = 0;
private:
static void _bind_methods();
Operation _operation = OPERATION_ADD;
float _smoothness = 0.f;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierSphere : public VoxelModifier {
GDCLASS(VoxelModifierSphere, VoxelModifier);
public:
float get_radius() const;
void set_radius(float r);
protected:
zylann::voxel::VoxelModifier *create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id) override;
private:
static void _bind_methods();
float _radius = 10.f;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class VoxelModifierMesh : public VoxelModifier {
GDCLASS(VoxelModifierMesh, VoxelModifier);
public:
void set_mesh_sdf(Ref<VoxelMeshSDF> mesh_sdf);
Ref<VoxelMeshSDF> get_mesh_sdf() const;
void set_isolevel(float isolevel);
float get_isolevel() const;
protected:
zylann::voxel::VoxelModifier *create(zylann::voxel::VoxelModifierStack &modifiers, uint32_t id) override;
private:
void _on_mesh_sdf_baked();
static void _bind_methods();
Ref<VoxelMeshSDF> _mesh_sdf;
float _isolevel = 0.0f;
};
} // namespace gd
} // namespace zylann::voxel
VARIANT_ENUM_CAST(zylann::voxel::gd::VoxelModifier::Operation);
#endif // VOXEL_MODIFIERS_GD_H

View File

@ -7,13 +7,18 @@
namespace zylann::voxel {
// Stores loaded voxel data for a chunk of the volume. Mesh and colliders are stored separately.
// Stores voxel data for a chunk of the volume. Mesh and colliders are stored separately.
// Voxel data can be present, or not. If not present, it means we know the block contains no edits, and voxels can be
// obtained by querying generators.
// Voxel data can also be present as a cache of generators, for cheaper repeated queries.
class VoxelDataBlock {
public:
RefCount viewers;
VoxelDataBlock() {}
VoxelDataBlock(unsigned int p_lod_index) : _lod_index(p_lod_index) {}
VoxelDataBlock(std::shared_ptr<VoxelBufferInternal> &buffer, unsigned int p_lod_index) :
_voxels(buffer), _lod_index(p_lod_index) {}
@ -39,6 +44,14 @@ public:
return _lod_index;
}
// Tests if voxel data is present.
// If false, it means the block has no edits and does not contain cached generated data,
// so we may fallback on procedural generators on the fly or request a cache.
inline bool has_voxels() const {
return _voxels != nullptr;
}
// Get voxels, expecting them to be present
VoxelBufferInternal &get_voxels() {
#ifdef DEBUG_ENABLED
ZN_ASSERT(_voxels != nullptr);
@ -46,6 +59,7 @@ public:
return *_voxels;
}
// Get voxels, expecting them to be present
const VoxelBufferInternal &get_voxels_const() const {
#ifdef DEBUG_ENABLED
ZN_ASSERT(_voxels != nullptr);
@ -53,6 +67,7 @@ public:
return *_voxels;
}
// Get voxels, expecting them to be present
std::shared_ptr<VoxelBufferInternal> get_voxels_shared() const {
#ifdef DEBUG_ENABLED
ZN_ASSERT(_voxels != nullptr);
@ -65,6 +80,11 @@ public:
_voxels = buffer;
}
void clear_voxels() {
_voxels = nullptr;
_edited = false;
}
void set_modified(bool modified);
inline bool is_modified() const {
@ -80,7 +100,7 @@ public:
}
inline void set_edited(bool edited) {
_edited = true;
_edited = edited;
}
inline bool is_edited() const {
@ -90,16 +110,17 @@ public:
private:
std::shared_ptr<VoxelBufferInternal> _voxels;
// TODO Storing lod index here might not be necessary, it is known since we have to get the map first
uint8_t _lod_index = 0;
// The block was edited, which requires its LOD counterparts to be recomputed
// Indicates mipmaps need to be computed since this block was modified.
bool _needs_lodding = false;
// Indicates if this block is different from the time it was loaded (should be saved)
bool _modified = false;
// Tells if the block has ever been edited.
// If `false`, the same data can be obtained by running the generator.
// If `false`, then the data is a cache of generators and modifiers. It can be re-generated.
// Once it becomes `true`, it usually never comes back to `false` unless reverted.
bool _edited = false;

View File

@ -16,7 +16,9 @@ public:
_offset_in_blocks = blocks_box.pos;
blocks_box.for_each_cell_zxy([&map, this](const Vector3i pos) {
VoxelDataBlock *block = map.get_block(pos);
if (block != nullptr) {
// TODO Might need to invoke the generator at some level for present blocks without voxels,
// or make sure all blocks contain voxel data
if (block != nullptr && block->has_voxels()) {
set_block(pos, block->get_voxels_shared());
} else {
set_block(pos, nullptr);

View File

@ -54,7 +54,7 @@ unsigned int VoxelDataMap::get_lod_index() const {
int VoxelDataMap::get_voxel(Vector3i pos, unsigned int c) const {
Vector3i bpos = voxel_to_block(pos);
const VoxelDataBlock *block = get_block(bpos);
if (block == nullptr) {
if (block == nullptr || !block->has_voxels()) {
return _default_voxel[c];
}
RWLockRead lock(block->get_voxels_const().get_lock());
@ -93,7 +93,8 @@ void VoxelDataMap::set_voxel(int value, Vector3i pos, unsigned int c) {
float VoxelDataMap::get_voxel_f(Vector3i pos, unsigned int c) const {
Vector3i bpos = voxel_to_block(pos);
const VoxelDataBlock *block = get_block(bpos);
if (block == nullptr) {
// TODO The generator needs to be invoked if the block has no voxels
if (block == nullptr || !block->has_voxels()) {
// TODO Not valid for a float return value
return _default_voxel[c];
}
@ -105,6 +106,8 @@ float VoxelDataMap::get_voxel_f(Vector3i pos, unsigned int c) const {
void VoxelDataMap::set_voxel_f(real_t value, Vector3i pos, unsigned int c) {
VoxelDataBlock *block = get_or_create_block_at_voxel_pos(pos);
Vector3i lpos = to_local(pos);
// TODO In this situation, the generator must be invoked to fill the block
ZN_ASSERT_RETURN_MSG(block->has_voxels(), "Block not cached");
VoxelBufferInternal &voxels = block->get_voxels();
RWLockWrite lock(voxels.get_lock());
voxels.set_voxel_f(value, lpos.x, lpos.y, lpos.z, c);
@ -159,6 +162,26 @@ VoxelDataBlock *VoxelDataMap::set_block_buffer(
return block;
}
VoxelDataBlock *VoxelDataMap::set_empty_block(Vector3i bpos, bool overwrite) {
VoxelDataBlock *block = get_block(bpos);
if (block == nullptr) {
VoxelDataBlock &map_block = _blocks_map[bpos];
map_block = std::move(VoxelDataBlock(_lod_index));
block = &map_block;
} else if (overwrite) {
block->clear_voxels();
} else {
ZN_PROFILE_MESSAGE("Redundant data block");
ZN_PRINT_VERBOSE(format(
"Discarded block {} lod {}, there was already data and overwriting is not enabled", bpos, _lod_index));
}
return block;
}
bool VoxelDataMap::has_block(Vector3i pos) const {
return _blocks_map.find(pos) != _blocks_map.end();
}
@ -194,7 +217,7 @@ void VoxelDataMap::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsig
const VoxelDataBlock *block = get_block(bpos);
const Vector3i src_block_origin = block_to_voxel(bpos);
if (block != nullptr) {
if (block != nullptr && block->has_voxels()) {
const VoxelBufferInternal &src_buffer = block->get_voxels_const();
RWLockRead rlock(src_buffer.get_lock());
@ -260,6 +283,9 @@ void VoxelDataMap::paste(Vector3i min_pos, VoxelBufferInternal &src_buffer, unsi
}
}
// TODO In this situation, the generator has to be invoked to fill the blanks
ZN_ASSERT_CONTINUE_MSG(block->has_voxels(), "Area not cached");
const Vector3i dst_block_origin = block_to_voxel(bpos);
VoxelBufferInternal &dst_buffer = block->get_voxels();
@ -306,7 +332,7 @@ bool VoxelDataMap::is_area_fully_loaded(const Box3i voxels_box) const {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator) {
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming) {
ZN_PROFILE_SCOPE();
//ERR_FAIL_COND_MSG(_full_load_mode == false, nullptr, "This function can only be used in full load mode");
@ -333,10 +359,20 @@ void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generat
{
RWLockRead rlock(data_lod.map_lock);
block_box.for_each_cell([&data_lod, lod_index, &todo](Vector3i block_pos) {
block_box.for_each_cell([&data_lod, lod_index, &todo, is_streaming](Vector3i block_pos) {
// We don't check "loading blocks", because this function wants to complete the task right now.
if (!data_lod.map.has_block(block_pos)) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
if (is_streaming) {
// Non-resident blocks must not be touched because we don't know what's in them.
// We can generate caches if resident ones have no voxel data.
if (block != nullptr && !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
} else {
// We can generate anywhere voxel data is not in memory
if (block == nullptr || !block->has_voxels()) {
todo.push_back(Task{ block_pos, lod_index, nullptr });
}
}
});
}
@ -356,6 +392,7 @@ void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generat
VoxelGenerator::VoxelQueryData q{ *task.voxels, task.block_pos * (data_block_size << task.lod_index),
task.lod_index };
generator->generate_block(q);
data.modifiers.apply(q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << q.lod));
}
}
@ -385,4 +422,20 @@ void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generat
}
}
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box) {
for (unsigned int lod_index = 0; lod_index < data.lod_count; ++lod_index) {
VoxelDataLodMap::Lod &lod = data.lods[lod_index];
RWLockRead rlock(lod.map_lock);
const Box3i blocks_box = p_voxel_box.downscaled(lod.map.get_block_size() << lod_index);
blocks_box.for_each_cell_zxy([&lod](const Vector3i bpos) {
VoxelDataBlock *block = lod.map.get_block(bpos);
if (block == nullptr || block->is_edited() || block->is_modified()) {
return;
}
block->clear_voxels();
});
}
}
} // namespace zylann::voxel

View File

@ -3,6 +3,7 @@
#include "../util/fixed_array.h"
#include "../util/profiling.h"
#include "modifiers.h"
#include "voxel_data_block.h"
#include <unordered_map>
@ -11,9 +12,17 @@ namespace zylann::voxel {
class VoxelGenerator;
// Infinite voxel storage by means of octants like Gridmap, within a constant LOD.
// Sparse voxel storage by means of cubic chunks, within a constant LOD.
//
// Convenience functions to access VoxelBuffers internally will lock them to protect against multithreaded access.
// However, the map itself is not thread-safe.
//
// When doing data streaming, the volume is *partially* loaded. If a block is not found at some coordinates,
// it means we don't know if it contains edits or not. Knowing this is important to avoid writing or caching voxel data
// in blank areas, that may be completely different once loaded.
// When using "full load" of edits, it doesn't matter. If all edits are loaded, we know up-front that everything else
// isn't edited (which also means we may not find blocks without data in them).
//
class VoxelDataMap {
public:
// Converts voxel coodinates into block coordinates.
@ -75,6 +84,7 @@ public:
// Moves the given buffer into a block of the map. The buffer is referenced, no copy is made.
VoxelDataBlock *set_block_buffer(Vector3i bpos, std::shared_ptr<VoxelBufferInternal> &buffer, bool overwrite);
VoxelDataBlock *set_empty_block(Vector3i bpos, bool overwrite);
struct NoAction {
inline void operator()(VoxelDataBlock &block) {}
@ -192,6 +202,7 @@ private:
// To prevent too much hashing, this reference is checked before.
//mutable VoxelDataBlock *_last_accessed_block = nullptr;
// This is block size in VOXELS. To convert to space units, use `block_size << lod_index`.
unsigned int _block_size;
unsigned int _block_size_pow2;
unsigned int _block_size_mask;
@ -210,13 +221,17 @@ struct VoxelDataLodMap {
// Each LOD works in a set of coordinates spanning 2x more voxels the higher their index is
FixedArray<Lod, constants::MAX_LOD> lods;
unsigned int lod_count = 1;
VoxelModifierStack modifiers;
};
// Generates all non-present blocks in preparation for an edit.
// Every block intersecting with the box at every LOD will be checked.
// This function runs sequentially and should be thread-safe. May be used if blocks are immediately needed.
// It will block if other threads are accessing the same data.
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator);
void preload_box(VoxelDataLodMap &data, Box3i voxel_box, VoxelGenerator *generator, bool is_streaming);
// Clears voxel data from blocks that are pure results of generators and modifiers.
void clear_cached_blocks_in_voxel_area(VoxelDataLodMap &data, Box3i p_voxel_box);
} // namespace zylann::voxel

View File

@ -110,6 +110,8 @@ public:
virtual int get_lod_count() const;
// Should generated blocks be saved immediately? If not, they will be saved only when modified.
// If this is enabled, generated blocks will immediately be considered edited and will be saved to the stream.
// Warning: this is incompatible with non-destructive workflows such as modifiers.
void set_save_generator_output(bool enabled);
bool get_save_generator_output() const;

View File

@ -575,12 +575,15 @@ struct ScheduleSaveAction {
if (block.is_modified()) {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelTerrain::BlockToSave b;
if (with_copy) {
RWLockRead lock(block.get_voxels().get_lock());
b.voxels = make_shared_instance<VoxelBufferInternal>();
block.get_voxels_const().duplicate_to(*b.voxels, true);
} else {
b.voxels = block.get_voxels_shared();
// If a modified block has no voxels, it is equivalent to removing the block from the stream
if (block.has_voxels()) {
if (with_copy) {
RWLockRead lock(block.get_voxels().get_lock());
b.voxels = make_shared_instance<VoxelBufferInternal>();
block.get_voxels_const().duplicate_to(*b.voxels, true);
} else {
b.voxels = block.get_voxels_shared();
}
}
b.position = bpos;
blocks_to_save.push_back(b);
@ -936,8 +939,8 @@ static void request_block_load(uint32_t volume_id, std::shared_ptr<StreamingDepe
init_sparse_grid_priority_dependency(
priority_dependency, block_pos, data_block_size, shared_viewers_data, volume_transform);
LoadBlockDataTask *task = ZN_NEW(LoadBlockDataTask(
volume_id, block_pos, 0, data_block_size, request_instances, stream_dependency, priority_dependency));
LoadBlockDataTask *task = ZN_NEW(LoadBlockDataTask(volume_id, block_pos, 0, data_block_size, request_instances,
stream_dependency, priority_dependency, true));
VoxelServer::get_singleton().push_async_io_task(task);
@ -1471,7 +1474,7 @@ void VoxelTerrain::process_meshing() {
MeshBlockTask *task = ZN_NEW(MeshBlockTask);
task->volume_id = _volume_id;
task->position = mesh_block_pos;
task->lod = 0;
task->lod_index = 0;
task->meshing_dependency = _meshing_dependency;
task->data_block_size = get_data_block_size();
task->collision_hint = _generate_collisions;
@ -1480,7 +1483,7 @@ void VoxelTerrain::process_meshing() {
task->blocks_count = 0;
data_box.for_each_cell_zxy([this, task](Vector3i data_block_pos) {
VoxelDataBlock *data_block = _data_map.get_block(data_block_pos);
if (data_block != nullptr) {
if (data_block != nullptr && data_block->has_voxels()) {
task->blocks[task->blocks_count] = data_block->get_voxels_shared();
}
++task->blocks_count;

View File

@ -106,10 +106,13 @@ struct ScheduleSaveAction {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelLodTerrainUpdateData::BlockToSave b;
b.voxels = make_shared_instance<VoxelBufferInternal>();
{
RWLockRead lock(block.get_voxels().get_lock());
block.get_voxels_const().duplicate_to(*b.voxels, true);
// If a modified block has no voxels, it is equivalent to removing the block from the stream
if (block.has_voxels()) {
b.voxels = make_shared_instance<VoxelBufferInternal>();
{
RWLockRead lock(block.get_voxels().get_lock());
block.get_voxels_const().duplicate_to(*b.voxels, true);
}
}
b.position = bpos;
@ -126,6 +129,29 @@ static inline uint64_t get_ticks_msec() {
} // namespace
void VoxelLodTerrain::ApplyMeshUpdateTask::run(TimeSpreadTaskContext &ctx) {
if (!VoxelServer::get_singleton().is_volume_valid(volume_id)) {
// The node can have been destroyed while this task was still pending
ZN_PRINT_VERBOSE("Cancelling ApplyMeshUpdateTask, volume_id is invalid");
return;
}
std::unordered_map<Vector3i, RefCount> &queued_tasks_in_lod = self->_queued_main_thread_mesh_updates[data.lod];
auto it = queued_tasks_in_lod.find(data.position);
if (it != queued_tasks_in_lod.end()) {
RefCount &count = it->second;
count.remove();
if (count.get() > 0) {
// This is not the only main thread task queued for this block.
// Cancel it to avoid buildup.
return;
}
queued_tasks_in_lod.erase(it);
}
self->apply_mesh_update(data);
}
VoxelLodTerrain::VoxelLodTerrain() {
// Note: don't do anything heavy in the constructor.
// Godot may create and destroy dozens of instances of all node types on startup,
@ -148,20 +174,6 @@ VoxelLodTerrain::VoxelLodTerrain() {
_update_data->settings.bounds_in_voxels =
Box3i::from_center_extents(Vector3i(), Vector3iUtil::create(constants::MAX_VOLUME_EXTENT));
struct ApplyMeshUpdateTask : public ITimeSpreadTask {
void run(TimeSpreadTaskContext &ctx) override {
if (!VoxelServer::get_singleton().is_volume_valid(volume_id)) {
// The node can have been destroyed while this task was still pending
ZN_PRINT_VERBOSE("Cancelling ApplyMeshUpdateTask, volume_id is invalid");
return;
}
self->apply_mesh_update(data);
}
uint32_t volume_id = 0;
VoxelLodTerrain *self = nullptr;
VoxelServer::BlockMeshOutput data;
};
// Mesh updates are spread over frames by scheduling them in a task runner of VoxelServer,
// but instead of using a reception buffer we use a callback,
// because this kind of task scheduling would otherwise delay the update by 1 frame
@ -174,6 +186,16 @@ VoxelLodTerrain::VoxelLodTerrain() {
task->self = self;
task->data = ob;
VoxelServer::get_singleton().push_main_thread_time_spread_task(task);
// If two tasks are queued for the same mesh, cancel the old ones.
// This is for cases where creating the mesh is slower than the speed at which it is generated,
// which can cause a buildup that never seems to stop.
// This is at the expense of holes appearing until all tasks are done.
std::unordered_map<Vector3i, RefCount> &queued_tasks_in_lod = self->_queued_main_thread_mesh_updates[ob.lod];
auto p = queued_tasks_in_lod.insert({ ob.position, RefCount(1) });
if (!p.second) {
p.first->second.add();
}
};
callbacks.data_output_callback = [](void *cb_data, VoxelServer::BlockDataOutput &ob) {
VoxelLodTerrain *self = reinterpret_cast<VoxelLodTerrain *>(cb_data);
@ -482,12 +504,16 @@ bool VoxelLodTerrain::is_area_editable(Box3i p_voxel_box) const {
}
inline std::shared_ptr<VoxelBufferInternal> try_get_voxel_buffer_with_lock(
const VoxelDataLodMap::Lod &data_lod, Vector3i block_pos) {
const VoxelDataLodMap::Lod &data_lod, Vector3i block_pos, bool &out_generate) {
RWLockRead rlock(data_lod.map_lock);
const VoxelDataBlock *block = data_lod.map.get_block(block_pos);
if (block == nullptr) {
return nullptr;
}
if (!block->has_voxels()) {
out_generate = true;
return nullptr;
}
return block->get_voxels_shared();
}
@ -509,13 +535,21 @@ VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel,
}
Vector3i block_pos = pos >> get_data_block_size_pow2();
bool generate = false;
if (_update_data->settings.full_load_mode) {
const VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos);
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos, generate);
if (voxels == nullptr) {
// TODO We should be able to get a value if modifiers are used but not a base generator
if (_generator.is_valid()) {
return _generator->generate_single(pos, channel);
VoxelSingleValue value = _generator->generate_single(pos, channel);
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
float sdf = value.f;
_data->modifiers.apply(sdf, to_vec3(pos));
value.f = sdf;
}
return value;
}
} else {
const Vector3i rpos = data_lod0.map.to_local(pos);
@ -534,10 +568,27 @@ VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel,
Vector3i voxel_pos = pos;
for (unsigned int lod_index = 0; lod_index < _update_data->settings.lod_count; ++lod_index) {
const VoxelDataLodMap::Lod &data_lod = _data->lods[lod_index];
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod, block_pos);
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod, block_pos, generate);
if (voxels != nullptr) {
return get_voxel_with_lock(*voxels, data_lod.map.to_local(voxel_pos), channel);
} else if (generate) {
// TODO We should be able to get a value if modifiers are used but not a base generator
if (_generator.is_valid()) {
VoxelSingleValue value = _generator->generate_single(pos, channel);
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
float sdf = value.f;
_data->modifiers.apply(sdf, to_vec3(pos));
value.f = sdf;
}
return value;
} else {
return defval;
}
}
// Fallback on lower LOD
block_pos = block_pos >> 1;
voxel_pos = voxel_pos >> 1;
@ -550,9 +601,12 @@ bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int ch
const Vector3i block_pos_lod0 = pos >> get_data_block_size_pow2();
VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
const Vector3i block_pos = data_lod0.map.voxel_to_block(pos);
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos);
bool can_generate = false;
std::shared_ptr<VoxelBufferInternal> voxels = try_get_voxel_buffer_with_lock(data_lod0, block_pos, can_generate);
if (voxels == nullptr) {
if (!_update_data->settings.full_load_mode) {
if (!_update_data->settings.full_load_mode && !can_generate) {
return false;
}
if (_generator.is_valid()) {
@ -560,6 +614,7 @@ bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int ch
voxels->create(Vector3iUtil::create(get_data_block_size()));
VoxelGenerator::VoxelQueryData q{ *voxels, pos, 0 };
_generator->generate_block(q);
_data->modifiers.apply(q.voxel_buffer, AABB(pos, q.voxel_buffer.get_size()));
RWLockWrite wlock(data_lod0.map_lock);
if (data_lod0.map.has_block(block_pos_lod0)) {
// A block was loaded by another thread, cancel our edit.
@ -571,21 +626,33 @@ bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int ch
// If it turns out to be a problem, use CoW?
RWLockWrite lock(voxels->get_lock());
voxels->set_voxel(value, data_lod0.map.to_local(pos), channel);
// We don't update mips, this must be done by the caller
return true;
}
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) {
const VoxelDataLodMap::Lod &data_lod0 = _data->lods[0];
VoxelModifierStack &modifiers = _data->modifiers;
if (_update_data->settings.full_load_mode && _generator.is_valid()) {
struct GenContext {
VoxelGenerator &generator;
const VoxelModifierStack &modifiers;
};
GenContext gctx{ **_generator, modifiers };
RWLockRead rlock(data_lod0.map_lock);
data_lod0.map.copy(p_origin_voxels, dst_buffer, channels_mask, *_generator,
data_lod0.map.copy(p_origin_voxels, dst_buffer, channels_mask, &gctx,
[](void *callback_data, VoxelBufferInternal &voxels, Vector3i pos) {
VoxelGenerator *generator = reinterpret_cast<VoxelGenerator *>(callback_data);
GenContext *gctx = reinterpret_cast<GenContext *>(callback_data);
VoxelGenerator::VoxelQueryData q{ voxels, pos, 0 };
generator->generate_block(q);
gctx->generator.generate_block(q);
gctx->modifiers.apply(voxels, AABB(pos, voxels.get_size()));
});
} else {
RWLockRead rlock(data_lod0.map_lock);
// TODO Apply modifiers
data_lod0.map.copy(p_origin_voxels, dst_buffer, channels_mask);
}
}
@ -622,6 +689,7 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
// TODO That boolean is also modified by the threaded update task (always set to false)
if (!block->get_needs_lodding()) {
block->set_needs_lodding(true);
// This is what indirectly causes remeshing
_update_data->state.blocks_pending_lodding_lod0.push_back(block_pos_lod0);
}
});
@ -632,6 +700,14 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
}
}
void VoxelLodTerrain::post_edit_modifiers(Box3i p_voxel_box) {
clear_cached_blocks_in_voxel_area(*_data, p_voxel_box);
// Not sure if it is worth re-caching these blocks. We may see about that in the future if performance is an issue.
MutexLock lock(_update_data->state.changed_generated_areas_mutex);
_update_data->state.changed_generated_areas.push_back(p_voxel_box);
}
void VoxelLodTerrain::push_async_edit(IThreadedTask *task, Box3i box, std::shared_ptr<AsyncDependencyTracker> tracker) {
CRASH_COND(task == nullptr);
CRASH_COND(tracker == nullptr);
@ -788,6 +864,10 @@ void VoxelLodTerrain::_set_lod_count(int p_lod_count) {
item.octree.create(p_lod_count, nda);
}
for (unsigned int i = 0; i < _queued_main_thread_mesh_updates.size(); ++i) {
_queued_main_thread_mesh_updates[i].clear();
}
// Not entirely required, but changing LOD count at runtime is rarely needed
reset_maps();
}
@ -804,7 +884,11 @@ void VoxelLodTerrain::reset_maps() {
VoxelLodTerrainUpdateData::State &state = _update_data->state;
// Make a new one, so if threads still reference the old one it will be a different copy
_data = make_shared_instance<VoxelDataLodMap>();
std::shared_ptr<VoxelDataLodMap> new_data = make_shared_instance<VoxelDataLodMap>();
// Keep modifiers, we only reset voxel data
new_data->modifiers = std::move(_data->modifiers);
_data = new_data;
_data->lod_count = lod_count;
for (unsigned int lod_index = 0; lod_index < state.lods.size(); ++lod_index) {
@ -1331,6 +1415,14 @@ void VoxelLodTerrain::apply_data_block_response(VoxelServer::BlockDataOutput &ob
VoxelDataBlock *block = data_lod.map.set_block_buffer(ob.position, ob.voxels, false);
CRASH_COND(block == nullptr);
block->set_edited(ob.type == VoxelServer::BlockDataOutput::TYPE_LOADED);
} else {
// Loading returned an empty block: that means we know the stream does not contain a block here.
// When doing data streaming, we'll generate on the fly if this block is queried.
VoxelDataLodMap::Lod &data_lod = _data->lods[ob.lod];
RWLockWrite wlock(data_lod.map_lock);
VoxelDataBlock *block = data_lod.map.set_empty_block(ob.position, false);
ZN_ASSERT(block != nullptr);
}
{
@ -1471,6 +1563,11 @@ void VoxelLodTerrain::apply_mesh_update(const VoxelServer::BlockMeshOutput &ob)
block->set_mesh(mesh, DirectMeshInstance::GIMode(get_gi_mode()));
{
// TODO Optimize: don't build transition meshes until they need to be shown.
// Profiling has shown Godot takes as much time to build one as the main mesh of a block, so because there are 6
// transition meshes, we spend about 80% of the time on these. Which is counter-intuitive because transition
// meshes are tiny in comparison... (collision meshes still take 5x more time than building ALL rendering meshes
// but that's a different issue)
ZN_PROFILE_SCOPE_NAMED("Transition meshes");
for (unsigned int dir = 0; dir < mesh_data.transition_surfaces.size(); ++dir) {
Ref<ArrayMesh> transition_mesh = build_mesh(to_span(mesh_data.transition_surfaces[dir]),
@ -2176,16 +2273,12 @@ Array VoxelLodTerrain::_b_debug_print_sdf_top_down(Vector3i center, Vector3i ext
const VoxelDataLodMap::Lod &data_lod = _data->lods[lod_index];
world_box.for_each_cell([&data_lod, &buffer, world_box](const Vector3i &world_pos) {
std::shared_ptr<VoxelBufferInternal> voxels =
try_get_voxel_buffer_with_lock(data_lod, data_lod.map.voxel_to_block(world_pos));
if (voxels == nullptr) {
return;
}
const float v =
get_voxel_with_lock(*voxels, data_lod.map.to_local(world_pos), VoxelBufferInternal::CHANNEL_SDF).f;
world_box.for_each_cell([this, world_box, &buffer](const Vector3i &world_pos) {
const Vector3i rpos = world_pos - world_box.pos;
buffer.set_voxel_f(v, rpos.x, rpos.y, rpos.z, VoxelBufferInternal::CHANNEL_SDF);
VoxelSingleValue v;
v.f = 1.f;
v = get_voxel(world_pos, VoxelBufferInternal::CHANNEL_SDF, v);
buffer.set_voxel_f(v.f, rpos.x, rpos.y, rpos.z, VoxelBufferInternal::CHANNEL_SDF);
});
Ref<Image> image = gd::VoxelBuffer::debug_print_sdf_to_image_top_down(buffer);

View File

@ -143,6 +143,7 @@ public:
// These must be called after an edit
void post_edit_area(Box3i p_box);
void post_edit_modifiers(Box3i p_voxel_box);
// TODO This still sucks atm cuz the edit will still run on the main thread
void push_async_edit(IThreadedTask *task, Box3i box, std::shared_ptr<AsyncDependencyTracker> tracker);
@ -339,6 +340,16 @@ private:
std::shared_ptr<StreamingDependency> _streaming_dependency;
std::shared_ptr<MeshingDependency> _meshing_dependency;
struct ApplyMeshUpdateTask : public ITimeSpreadTask {
void run(TimeSpreadTaskContext &ctx) override;
uint32_t volume_id = 0;
VoxelLodTerrain *self = nullptr;
VoxelServer::BlockMeshOutput data;
};
FixedArray<std::unordered_map<Vector3i, RefCount>, constants::MAX_LOD> _queued_main_thread_mesh_updates;
#ifdef TOOLS_ENABLED
bool _show_gizmos_enabled = false;
bool _show_octree_bounds_gizmos = true;

View File

@ -54,6 +54,10 @@ struct VoxelLodTerrainUpdateData {
bool full_load_mode = false;
bool run_stream_in_editor = true;
unsigned int mesh_block_size_po2 = 4;
// If true, try to generate blocks and store them in the data map before posting mesh requests.
// If false, everything will generate non-edited voxels on the fly instead.
// Not really exposed for now, will wait for it to be really needed. It might never be.
bool cache_generated_blocks = false;
};
enum MeshState {
@ -159,6 +163,10 @@ struct VoxelLodTerrainUpdateData {
BinaryMutex pending_async_edits_mutex;
std::vector<RunningAsyncEdit> running_async_edits;
// Areas where generated stuff has changed. Similar to an edit, but non-destructive.
std::vector<Box3i> changed_generated_areas;
BinaryMutex changed_generated_areas_mutex;
Stats stats;
};

View File

@ -103,15 +103,18 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
// We should find a way to make it asynchronous, not need mips, or not edit outside viewers area.
std::shared_ptr<VoxelBufferInternal> voxels = make_shared_instance<VoxelBufferInternal>();
voxels->create(Vector3iUtil::create(data_block_size));
VoxelGenerator::VoxelQueryData q{ //
*voxels, //
dst_bpos << (dst_lod_index + data_block_size_po2), //
dst_lod_index
};
if (generator.is_valid()) {
ZN_PROFILE_SCOPE_NAMED("Generate");
VoxelGenerator::VoxelQueryData q{ //
*voxels, //
dst_bpos << (dst_lod_index + data_block_size_po2), //
dst_lod_index
};
generator->generate_block(q);
}
data.modifiers.apply(
q.voxel_buffer, AABB(q.origin_in_voxels, q.voxel_buffer.get_size() << dst_lod_index));
dst_block = dst_data_lod.map.set_block_buffer(dst_bpos, voxels, true);
} else {
@ -121,9 +124,9 @@ void VoxelLodTerrainUpdateTask::flush_pending_lod_edits(VoxelLodTerrainUpdateDat
}
}
// The block and its lower LODs are expected to be available.
// The block and its lower LOD indices are expected to be available.
// Otherwise it means the function was called too late
CRASH_COND(src_block == nullptr);
ZN_ASSERT(src_block != nullptr && src_block->has_voxels());
//CRASH_COND(dst_block == nullptr);
{
@ -177,8 +180,11 @@ struct BeforeUnloadDataAction {
if (save && block.is_modified()) {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelLodTerrainUpdateData::BlockToSave b;
// We don't copy since the block will be unloaded anyways
b.voxels = block.get_voxels_shared();
// We don't copy since the block will be unloaded anyways.
// If a modified block has no voxels, it is equivalent to removing the block from the stream
if (block.has_voxels()) {
b.voxels = block.get_voxels_shared();
}
b.position = bpos;
b.lod = block.get_lod_index();
blocks_to_save.push_back(b);
@ -655,6 +661,10 @@ static bool check_block_loaded_and_meshed(VoxelLodTerrainUpdateData::State &stat
#endif
if (settings.full_load_mode == false) {
// We want to know everything about the data intersecting this mesh block.
// This is not known in advance when we stream it, it has to be requested.
// When not streaming, `block == null` is the same as `!block->has_voxels()` so we wouldn't need to enter here.
VoxelDataLodMap::Lod &data_lod = data.lods[lod_index];
if (mesh_block_size > data_block_size) {
@ -672,7 +682,7 @@ static bool check_block_loaded_and_meshed(VoxelLodTerrainUpdateData::State &stat
if (data_block == nullptr) {
loaded = false;
// TODO This is quite lossy in this case, if we ask for 8 blocks in an octant
// TODO Optimization: this iterates too many blocks, if we ask for 8 blocks in an octant.
try_schedule_loading_with_neighbors_no_lock(
state, data, data_block_pos, lod_index, blocks_to_load, settings.bounds_in_voxels);
}
@ -1052,11 +1062,12 @@ static void init_sparse_octree_priority_dependency(PriorityDependency &dep, Vect
VoxelServer::get_octree_lod_block_region_extent(octree_lod_distance, data_block_size));
}
// This is only if we want to cache voxel data
static void request_block_generate(uint32_t volume_id, unsigned int data_block_size,
std::shared_ptr<StreamingDependency> &stream_dependency, Vector3i block_pos, int lod,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
float lod_distance, std::shared_ptr<AsyncDependencyTracker> tracker, bool allow_drop,
BufferedTaskScheduler &task_scheduler) {
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
Vector3i block_pos, int lod, std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data,
const Transform3D &volume_transform, float lod_distance, std::shared_ptr<AsyncDependencyTracker> tracker,
bool allow_drop, BufferedTaskScheduler &task_scheduler) {
//
CRASH_COND(data_block_size > 255);
CRASH_COND(stream_dependency == nullptr);
@ -1072,6 +1083,7 @@ static void request_block_generate(uint32_t volume_id, unsigned int data_block_s
task->stream_dependency = stream_dependency;
task->tracker = tracker;
task->drop_beyond_max_distance = allow_drop;
task->data = data;
init_sparse_octree_priority_dependency(task->priority_dependency, block_pos, lod, data_block_size,
shared_viewers_data, volume_transform, lod_distance);
@ -1079,10 +1091,12 @@ static void request_block_generate(uint32_t volume_id, unsigned int data_block_s
task_scheduler.push_main_task(task);
}
// Used only when streaming block by block
static void request_block_load(uint32_t volume_id, unsigned int data_block_size,
std::shared_ptr<StreamingDependency> &stream_dependency, Vector3i block_pos, int lod, bool request_instances,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
Vector3i block_pos, int lod, bool request_instances,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
float lod_distance, BufferedTaskScheduler &task_scheduler) {
const VoxelLodTerrainUpdateData::Settings &settings, BufferedTaskScheduler &task_scheduler) {
//
CRASH_COND(data_block_size > 255);
CRASH_COND(stream_dependency == nullptr);
@ -1090,31 +1104,51 @@ static void request_block_load(uint32_t volume_id, unsigned int data_block_size,
if (stream_dependency->stream.is_valid()) {
PriorityDependency priority_dependency;
init_sparse_octree_priority_dependency(priority_dependency, block_pos, lod, data_block_size,
shared_viewers_data, volume_transform, lod_distance);
shared_viewers_data, volume_transform, settings.lod_distance);
LoadBlockDataTask *task = memnew(LoadBlockDataTask(
volume_id, block_pos, lod, data_block_size, request_instances, stream_dependency, priority_dependency));
LoadBlockDataTask *task = memnew(LoadBlockDataTask(volume_id, block_pos, lod, data_block_size,
request_instances, stream_dependency, priority_dependency, settings.cache_generated_blocks));
task_scheduler.push_io_task(task);
} else {
} else if (settings.cache_generated_blocks) {
// Directly generate the block without checking the stream.
request_block_generate(volume_id, data_block_size, stream_dependency, block_pos, lod, shared_viewers_data,
volume_transform, lod_distance, nullptr, true, task_scheduler);
request_block_generate(volume_id, data_block_size, stream_dependency, data, block_pos, lod, shared_viewers_data,
volume_transform, settings.lod_distance, nullptr, true, task_scheduler);
} else {
ZN_PRINT_WARNING("Requesting a block load when it should not have been necessary");
}
}
static void send_block_data_requests(uint32_t volume_id,
Span<const VoxelLodTerrainUpdateData::BlockLocation> blocks_to_load,
std::shared_ptr<StreamingDependency> &stream_dependency,
std::shared_ptr<StreamingDependency> &stream_dependency, const std::shared_ptr<VoxelDataLodMap> &data,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, unsigned int data_block_size,
bool request_instances, const Transform3D &volume_transform, float lod_distance,
BufferedTaskScheduler &task_scheduler) {
bool request_instances, const Transform3D &volume_transform,
const VoxelLodTerrainUpdateData::Settings &settings, BufferedTaskScheduler &task_scheduler) {
//
for (unsigned int i = 0; i < blocks_to_load.size(); ++i) {
const VoxelLodTerrainUpdateData::BlockLocation loc = blocks_to_load[i];
request_block_load(volume_id, data_block_size, stream_dependency, loc.position, loc.lod, request_instances,
shared_viewers_data, volume_transform, lod_distance, task_scheduler);
request_block_load(volume_id, data_block_size, stream_dependency, data, loc.position, loc.lod,
request_instances, shared_viewers_data, volume_transform, settings, task_scheduler);
}
}
static void apply_block_data_requests_as_empty(Span<const VoxelLodTerrainUpdateData::BlockLocation> blocks_to_load,
VoxelDataLodMap &data, VoxelLodTerrainUpdateData::State &state) {
for (unsigned int i = 0; i < blocks_to_load.size(); ++i) {
const VoxelLodTerrainUpdateData::BlockLocation loc = blocks_to_load[i];
VoxelDataLodMap::Lod &data_lod = data.lods[loc.lod];
VoxelLodTerrainUpdateData::Lod &lod = state.lods[loc.lod];
{
MutexLock mlock(lod.loading_blocks_mutex);
lod.loading_blocks.erase(loc.position);
}
{
RWLockWrite wlock(data_lod.map_lock);
data_lod.map.set_empty_block(loc.position, false);
}
}
}
@ -1184,7 +1218,7 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
MeshBlockTask *task = memnew(MeshBlockTask);
task->volume_id = volume_id;
task->position = mesh_block_pos;
task->lod = lod_index;
task->lod_index = lod_index;
task->meshing_dependency = meshing_dependency;
task->data_block_size = data_block_size;
task->data = data_ptr;
@ -1204,13 +1238,13 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
const VoxelDataBlock *nblock = data_lod.map.get_block(data_block_pos);
// The block can actually be null on some occasions. Not sure yet if it's that bad
//CRASH_COND(nblock == nullptr);
if (nblock != nullptr) {
if (nblock != nullptr && nblock->has_voxels()) {
task->blocks[task->blocks_count] = nblock->get_voxels_shared();
}
++task->blocks_count;
});
init_sparse_octree_priority_dependency(task->priority_dependency, task->position, task->lod,
init_sparse_octree_priority_dependency(task->priority_dependency, task->position, task->lod_index,
mesh_block_size, shared_viewers_data, volume_transform, settings.lod_distance);
task_scheduler.push_main_task(task);
@ -1226,8 +1260,9 @@ static void send_mesh_requests(uint32_t volume_id, VoxelLodTerrainUpdateData::St
// This function schedules one parallel task for every block.
// The returned tracker may be polled to detect when it is complete.
static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, const VoxelDataLodMap &data, Span<const Box3i> voxel_boxes,
Span<IThreadedTask *> next_tasks, uint32_t volume_id, std::shared_ptr<StreamingDependency> &stream_dependency,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelDataLodMap> data_ptr,
Span<const Box3i> voxel_boxes, Span<IThreadedTask *> next_tasks, uint32_t volume_id,
std::shared_ptr<StreamingDependency> &stream_dependency,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
BufferedTaskScheduler &task_scheduler) {
ZN_PROFILE_SCOPE();
@ -1241,6 +1276,8 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
std::vector<TaskArguments> todo;
ZN_ASSERT(data_ptr != nullptr);
VoxelDataLodMap &data = *data_ptr;
const unsigned int data_block_size = data.lods[0].map.get_block_size();
for (unsigned int lod_index = 0; lod_index < settings.lod_count; ++lod_index) {
@ -1288,8 +1325,9 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
for (unsigned int i = 0; i < todo.size(); ++i) {
const TaskArguments args = todo[i];
request_block_generate(volume_id, data_block_size, stream_dependency, args.block_pos, args.lod_index,
shared_viewers_data, volume_transform, settings.lod_distance, tracker, false, task_scheduler);
request_block_generate(volume_id, data_block_size, stream_dependency, data_ptr, args.block_pos,
args.lod_index, shared_viewers_data, volume_transform, settings.lod_distance, tracker, false,
task_scheduler);
}
} else if (next_tasks.size() > 0) {
@ -1301,8 +1339,8 @@ static std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(VoxelLodTerra
}
static void process_async_edits(VoxelLodTerrainUpdateData::State &state,
const VoxelLodTerrainUpdateData::Settings &settings, const VoxelDataLodMap &data, uint32_t volume_id,
std::shared_ptr<StreamingDependency> &stream_dependency,
const VoxelLodTerrainUpdateData::Settings &settings, const std::shared_ptr<VoxelDataLodMap> &data,
uint32_t volume_id, std::shared_ptr<StreamingDependency> &stream_dependency,
std::shared_ptr<PriorityDependency::ViewersData> &shared_viewers_data, const Transform3D &volume_transform,
BufferedTaskScheduler &task_scheduler) {
ZN_PROFILE_SCOPE();
@ -1340,6 +1378,39 @@ static void process_async_edits(VoxelLodTerrainUpdateData::State &state,
}
}
static void process_changed_generated_areas(
VoxelLodTerrainUpdateData::State &state, const VoxelLodTerrainUpdateData::Settings &settings) {
const unsigned int mesh_block_size = 1 << settings.mesh_block_size_po2;
MutexLock lock(state.changed_generated_areas_mutex);
if (state.changed_generated_areas.size() == 0) {
return;
}
for (unsigned int lod_index = 0; lod_index < settings.lod_count; ++lod_index) {
VoxelLodTerrainUpdateData::Lod &lod = state.lods[lod_index];
for (auto box_it = state.changed_generated_areas.begin(); box_it != state.changed_generated_areas.end();
++box_it) {
const Box3i &voxel_box = *box_it;
const Box3i bbox = voxel_box.padded(1).downscaled(mesh_block_size << lod_index);
// TODO If there are cached generated blocks, they need to be re-cached or removed
RWLockRead rlock(lod.mesh_map_state.map_lock);
bbox.for_each_cell_zxy([&lod](const Vector3i bpos) {
auto block_it = lod.mesh_map_state.map.find(bpos);
if (block_it != lod.mesh_map_state.map.end()) {
VoxelLodTerrainUpdateTask::schedule_mesh_update(block_it->second, bpos, lod.blocks_pending_update);
}
});
}
}
state.changed_generated_areas.clear();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
@ -1367,6 +1438,7 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
ProfilingClock profiling_clock;
ProfilingClock profiling_clock_total;
// TODO This is not a good name, "streaming" has several meanings
const bool stream_enabled = (stream.is_valid() || generator.is_valid()) &&
(Engine::get_singleton()->is_editor_hint() == false || settings.run_stream_in_editor);
@ -1388,8 +1460,12 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
// Update pending LOD data modifications due to edits.
// These are deferred from edits so we can batch them.
// It has to happen first because blocks can be unloaded afterwards.
// This is also what causes meshes to update after edits.
flush_pending_lod_edits(state, data, generator, settings.full_load_mode, 1 << settings.mesh_block_size_po2);
// Other mesh updates
process_changed_generated_areas(state, settings);
static thread_local std::vector<VoxelLodTerrainUpdateData::BlockToSave> data_blocks_to_save;
static thread_local std::vector<VoxelLodTerrainUpdateData::BlockLocation> data_blocks_to_load;
data_blocks_to_load.clear();
@ -1420,7 +1496,7 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
BufferedTaskScheduler &task_scheduler = BufferedTaskScheduler::get_for_current_thread();
process_async_edits(state, settings, data, _volume_id, _streaming_dependency, _shared_viewers_data,
process_async_edits(state, settings, _data, _volume_id, _streaming_dependency, _shared_viewers_data,
_volume_transform, task_scheduler);
profiling_clock.restart();
@ -1429,9 +1505,19 @@ void VoxelLodTerrainUpdateTask::run(ThreadedTaskContext ctx) {
// It's possible the user didn't set a stream yet, or it is turned off
if (stream_enabled) {
const unsigned int data_block_size = data.lods[0].map.get_block_size();
send_block_data_requests(_volume_id, to_span_const(data_blocks_to_load), _streaming_dependency,
_shared_viewers_data, data_block_size, _request_instances, _volume_transform, settings.lod_distance,
task_scheduler);
if (stream.is_null() && !settings.cache_generated_blocks) {
// TODO Optimization: not ideal because a bit delayed. It requires a second update cycle for meshes to
// get requested. We could instead set those empty blocks right away instead of putting them in that
// list, but it's simpler code for now.
apply_block_data_requests_as_empty(to_span(data_blocks_to_load), data, state);
} else {
send_block_data_requests(_volume_id, to_span(data_blocks_to_load), _streaming_dependency, _data,
_shared_viewers_data, data_block_size, _request_instances, _volume_transform, settings,
task_scheduler);
}
send_block_save_requests(
_volume_id, to_span(data_blocks_to_save), _streaming_dependency, data_block_size, task_scheduler);
}

View File

@ -10,6 +10,7 @@ int VoxelDataBlockEnterInfo::_b_get_network_peer_id() const {
Ref<gd::VoxelBuffer> VoxelDataBlockEnterInfo::_b_get_voxels() const {
ERR_FAIL_COND_V(voxel_block == nullptr, Ref<gd::VoxelBuffer>());
ERR_FAIL_COND_V(!voxel_block->has_voxels(), Ref<gd::VoxelBuffer>());
std::shared_ptr<VoxelBufferInternal> vbi = voxel_block->get_voxels_shared();
Ref<gd::VoxelBuffer> vb = gd::VoxelBuffer::create_shared(vbi);
return vb;

View File

@ -9,6 +9,7 @@
#endif
#ifdef ZN_DSTACK_ENABLED
// Put this macro on top of each function you want to track in debug stack traces.
#define ZN_DSTACK() zylann::dstack::Scope dstack_scope_##__LINE__(__FILE__, __LINE__, __FUNCTION__)
#else
#define ZN_DSTACK()
@ -37,6 +38,7 @@ struct Frame {
struct Info {
public:
// Constructs a copy of the current stack gathered so far from ZN_DSTACK() calls
Info();
void to_string(FwdMutableStdString s) const;

View File

@ -22,6 +22,10 @@ inline bool is_normalized(const Vector3 &v) {
return v.is_normalized();
}
inline Vector3 lerp(const Vector3 a, const Vector3 b, const Vector3 alpha) {
return Vector3(Math::lerp(a.x, b.x, alpha.x), Math::lerp(a.y, b.y, alpha.y), Math::lerp(a.z, b.z, alpha.z));
}
} // namespace zylann::math
#endif // ZN_VECTOR3_H

View File

@ -177,6 +177,7 @@ public:
private:
// Non-static method for scripts because Godot4 does not support binding static methods (it's only
// implemented for primitive types)
// TODO Make it static, it is supported now
String _b_get_simd_level_name(SIMDLevel level);
static void _bind_methods();

View File

@ -9,6 +9,9 @@ namespace zylann {
// This one is not thread-safe.
class RefCount {
public:
RefCount() {}
RefCount(unsigned int initial_count): _count(initial_count) {}
inline void add() {
++_count;
}