Added optional threaded execution of VoxelLodTerrain's process function
This commit is contained in:
parent
4c68e7b298
commit
5ddcbc2065
@ -28,6 +28,7 @@ Godot 4 is required from this version.
|
||||
- SDF data is now encoded with `inorm8` and `inorm16`, instead of an arbitrary version of `unorm8` and `unorm16`. Migration code is in place to load old save files, but *do a backup before running your project with the new version*.
|
||||
- `VoxelLodTerrain`: added *experimental* `full_load_mode`, in which all edited data is loaded at once, allowing any area to be edited anytime. Useful for some fixed-size volumes.
|
||||
- `VoxelLodTerrain`: Editor: added option to show octree nodes in editor
|
||||
- `VoxelLodTerrain`: Added option to run a major part of the process logic into another thread
|
||||
- `VoxelToolLodTerrain`: added *experimental* `do_sphere_async`, an alternative version of `do_sphere` which defers the task on threads to reduce stutter if the affected area is big.
|
||||
- `VoxelInstancer`: Allow to dump VoxelInstancer as scene for debug inspection
|
||||
- `VoxelInstancer`: Editor: instance chunks are shown when the node is selected
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "generate_block_task.h"
|
||||
#include "../storage/voxel_buffer_internal.h"
|
||||
#include "../util/godot/funcs.h"
|
||||
#include "../util/macros.h"
|
||||
#include "../util/profiling.h"
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "load_block_data_task.h"
|
||||
#include "../storage/voxel_buffer_internal.h"
|
||||
#include "../util/godot/funcs.h"
|
||||
#include "../util/macros.h"
|
||||
#include "../util/profiling.h"
|
||||
|
@ -2,10 +2,9 @@
|
||||
#define VOXEL_MESH_BLOCK_TASK_H
|
||||
|
||||
#include "../constants/voxel_constants.h"
|
||||
#include "../generators/voxel_generator.h"
|
||||
#include "../meshers/voxel_mesher.h"
|
||||
#include "../storage/voxel_buffer_internal.h"
|
||||
#include "../util/tasks/threaded_task.h"
|
||||
#include "meshing_dependency.h"
|
||||
#include "priority_dependency.h"
|
||||
|
||||
namespace zylann::voxel {
|
||||
@ -13,12 +12,6 @@ namespace zylann::voxel {
|
||||
// Asynchronous task generating a mesh from voxel blocks and their neighbors, in a particular volume
|
||||
class MeshBlockTask : public IThreadedTask {
|
||||
public:
|
||||
struct MeshingDependency {
|
||||
Ref<VoxelMesher> mesher;
|
||||
Ref<VoxelGenerator> generator;
|
||||
bool valid = true;
|
||||
};
|
||||
|
||||
MeshBlockTask();
|
||||
~MeshBlockTask();
|
||||
|
||||
|
21
server/meshing_dependency.h
Normal file
21
server/meshing_dependency.h
Normal file
@ -0,0 +1,21 @@
|
||||
#ifndef VOXEL_MESHING_DEPENDENCY_H
|
||||
#define VOXEL_MESHING_DEPENDENCY_H
|
||||
|
||||
#include "../generators/voxel_generator.h"
|
||||
#include "../meshers/voxel_mesher.h"
|
||||
|
||||
namespace zylann::voxel {
|
||||
|
||||
// Shared dependency needed by some asynchronous tasks.
|
||||
// It may be passed with a shared_ptr.
|
||||
// Pointers inside should not change. If they do, a new instance will be made and old ones will be marked invalid,
|
||||
// rather than risking a bad pointer read or having to use (many) mutexes.
|
||||
struct MeshingDependency {
|
||||
Ref<VoxelMesher> mesher;
|
||||
Ref<VoxelGenerator> generator;
|
||||
bool valid = true;
|
||||
};
|
||||
|
||||
} // namespace zylann::voxel
|
||||
|
||||
#endif // VOXEL_MESHING_DEPENDENCY_H
|
@ -1,4 +1,5 @@
|
||||
#include "save_block_data_task.h"
|
||||
#include "../storage/voxel_buffer_internal.h"
|
||||
#include "../util/godot/funcs.h"
|
||||
#include "../util/macros.h"
|
||||
#include "../util/profiling.h"
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "generate_block_task.h"
|
||||
#include "load_all_blocks_data_task.h"
|
||||
#include "load_block_data_task.h"
|
||||
#include "mesh_block_task.h"
|
||||
#include "save_block_data_task.h"
|
||||
|
||||
#include <core/config/project_settings.h>
|
||||
@ -144,7 +145,7 @@ uint32_t VoxelServer::add_volume(VolumeCallbacks callbacks, VolumeType type) {
|
||||
Volume volume;
|
||||
volume.type = type;
|
||||
volume.callbacks = callbacks;
|
||||
volume.meshing_dependency = gd_make_shared<MeshBlockTask::MeshingDependency>();
|
||||
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
|
||||
return _world.volumes.create(volume);
|
||||
}
|
||||
|
||||
@ -194,7 +195,7 @@ void VoxelServer::set_volume_generator(uint32_t volume_id, Ref<VoxelGenerator> g
|
||||
volume.meshing_dependency->valid = false;
|
||||
}
|
||||
|
||||
volume.meshing_dependency = gd_make_shared<MeshBlockTask::MeshingDependency>();
|
||||
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
|
||||
volume.meshing_dependency->mesher = volume.mesher;
|
||||
volume.meshing_dependency->generator = volume.generator;
|
||||
}
|
||||
@ -207,7 +208,7 @@ void VoxelServer::set_volume_mesher(uint32_t volume_id, Ref<VoxelMesher> mesher)
|
||||
volume.meshing_dependency->valid = false;
|
||||
}
|
||||
|
||||
volume.meshing_dependency = gd_make_shared<MeshBlockTask::MeshingDependency>();
|
||||
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
|
||||
volume.meshing_dependency->mesher = volume.mesher;
|
||||
volume.meshing_dependency->generator = volume.generator;
|
||||
}
|
||||
@ -220,7 +221,7 @@ void VoxelServer::set_volume_octree_lod_distance(uint32_t volume_id, float lod_d
|
||||
void VoxelServer::invalidate_volume_mesh_requests(uint32_t volume_id) {
|
||||
Volume &volume = _world.volumes.get(volume_id);
|
||||
volume.meshing_dependency->valid = false;
|
||||
volume.meshing_dependency = gd_make_shared<MeshBlockTask::MeshingDependency>();
|
||||
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
|
||||
volume.meshing_dependency->mesher = volume.mesher;
|
||||
volume.meshing_dependency->generator = volume.generator;
|
||||
}
|
||||
@ -472,11 +473,11 @@ bool VoxelServer::viewer_exists(uint32_t viewer_id) const {
|
||||
return _world.viewers.is_valid(viewer_id);
|
||||
}
|
||||
|
||||
void VoxelServer::push_time_spread_task(zylann::ITimeSpreadTask *task) {
|
||||
void VoxelServer::push_main_thread_time_spread_task(zylann::ITimeSpreadTask *task) {
|
||||
_time_spread_task_runner.push(task);
|
||||
}
|
||||
|
||||
void VoxelServer::push_progressive_task(zylann::IProgressiveTask *task) {
|
||||
void VoxelServer::push_main_thread_progressive_task(zylann::IProgressiveTask *task) {
|
||||
_progressive_task_runner.push(task);
|
||||
}
|
||||
|
||||
@ -492,6 +493,14 @@ void VoxelServer::push_async_tasks(Span<zylann::IThreadedTask *> tasks) {
|
||||
_general_thread_pool.enqueue(tasks);
|
||||
}
|
||||
|
||||
void VoxelServer::push_async_io_task(zylann::IThreadedTask *task) {
|
||||
_streaming_thread_pool.enqueue(task);
|
||||
}
|
||||
|
||||
void VoxelServer::push_async_io_tasks(Span<zylann::IThreadedTask *> tasks) {
|
||||
_streaming_thread_pool.enqueue(tasks);
|
||||
}
|
||||
|
||||
void VoxelServer::process() {
|
||||
VOXEL_PROFILE_SCOPE();
|
||||
VOXEL_PROFILE_PLOT("Static memory usage", int64_t(OS::get_singleton()->get_static_memory_usage()));
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef VOXEL_SERVER_H
|
||||
#define VOXEL_SERVER_H
|
||||
|
||||
#include "../constants/voxel_constants.h"
|
||||
#include "../generators/voxel_generator.h"
|
||||
#include "../meshers/blocky/voxel_mesher_blocky.h"
|
||||
#include "../streams/voxel_stream.h"
|
||||
@ -9,7 +10,8 @@
|
||||
#include "../util/tasks/progressive_task_runner.h"
|
||||
#include "../util/tasks/threaded_task_runner.h"
|
||||
#include "../util/tasks/time_spread_task_runner.h"
|
||||
#include "mesh_block_task.h"
|
||||
#include "meshing_dependency.h"
|
||||
#include "priority_dependency.h"
|
||||
#include "streaming_dependency.h"
|
||||
|
||||
#include <memory>
|
||||
@ -102,7 +104,6 @@ public:
|
||||
VoxelServer();
|
||||
~VoxelServer();
|
||||
|
||||
// TODO Rename functions to C convention
|
||||
uint32_t add_volume(VolumeCallbacks callbacks, VolumeType type);
|
||||
void set_volume_transform(uint32_t volume_id, Transform3D t);
|
||||
void set_volume_render_block_size(uint32_t volume_id, uint32_t block_size);
|
||||
@ -126,7 +127,10 @@ public:
|
||||
void remove_volume(uint32_t volume_id);
|
||||
bool is_volume_valid(uint32_t volume_id) const;
|
||||
|
||||
// TODO Rename functions to C convention
|
||||
std::shared_ptr<PriorityDependency::ViewersData> get_shared_viewers_data_from_default_world() const {
|
||||
return _world.shared_priority_dependency;
|
||||
}
|
||||
|
||||
uint32_t add_viewer();
|
||||
void remove_viewer(uint32_t viewer_id);
|
||||
void set_viewer_position(uint32_t viewer_id, Vector3 position);
|
||||
@ -147,15 +151,19 @@ public:
|
||||
_world.viewers.for_each_with_id(f);
|
||||
}
|
||||
|
||||
void push_time_spread_task(ITimeSpreadTask *task);
|
||||
void push_main_thread_time_spread_task(ITimeSpreadTask *task);
|
||||
int get_main_thread_time_budget_usec() const;
|
||||
|
||||
void push_progressive_task(IProgressiveTask *task);
|
||||
void push_main_thread_progressive_task(IProgressiveTask *task);
|
||||
|
||||
// Thread-safe.
|
||||
void push_async_task(IThreadedTask *task);
|
||||
// Thread-safe.
|
||||
void push_async_tasks(Span<IThreadedTask *> tasks);
|
||||
// Thread-safe.
|
||||
void push_async_io_task(IThreadedTask *task);
|
||||
// Thread-safe.
|
||||
void push_async_io_tasks(Span<IThreadedTask *> tasks);
|
||||
|
||||
// Gets by how much voxels must be padded with neighbors in order to be polygonized properly
|
||||
// void get_min_max_block_padding(
|
||||
@ -227,7 +235,7 @@ private:
|
||||
uint32_t data_block_size = 16;
|
||||
float octree_lod_distance = 0;
|
||||
std::shared_ptr<StreamingDependency> stream_dependency;
|
||||
std::shared_ptr<MeshBlockTask::MeshingDependency> meshing_dependency;
|
||||
std::shared_ptr<MeshingDependency> meshing_dependency;
|
||||
};
|
||||
|
||||
struct World {
|
||||
|
@ -189,6 +189,7 @@ VoxelDataBlock *VoxelDataMap::set_block_buffer(
|
||||
} else if (overwrite) {
|
||||
block->set_voxels(buffer);
|
||||
} else {
|
||||
VOXEL_PROFILE_MESSAGE("Redundant data block");
|
||||
PRINT_VERBOSE(String("Discarded block {0} lod {1}, there was already data and overwriting is not enabled")
|
||||
.format(varray(bpos, _lod_index)));
|
||||
}
|
||||
|
@ -193,6 +193,10 @@ public:
|
||||
return po;
|
||||
}
|
||||
|
||||
static inline unsigned int get_octree_size_po2(unsigned int block_size_po2, unsigned int lod_count) {
|
||||
return block_size_po2 + lod_count - 1;
|
||||
}
|
||||
|
||||
private:
|
||||
// This pool treats nodes as packs of 8 so they can be addressed by only knowing the first child
|
||||
class NodePool {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,11 @@
|
||||
#ifndef VOXEL_LOD_TERRAIN_HPP
|
||||
#define VOXEL_LOD_TERRAIN_HPP
|
||||
|
||||
#include "../server/mesh_block_task.h"
|
||||
#include "../server/voxel_server.h"
|
||||
#include "../storage/voxel_data_map.h"
|
||||
#include "lod_octree.h"
|
||||
#include "voxel_lod_terrain_update_data.h"
|
||||
#include "voxel_mesh_map.h"
|
||||
#include "voxel_node.h"
|
||||
|
||||
@ -85,6 +87,9 @@ public:
|
||||
void set_full_load_mode_enabled(bool enabled);
|
||||
bool is_full_load_mode_enabled() const;
|
||||
|
||||
void set_threaded_update_enabled(bool enabled);
|
||||
bool is_threaded_update_enabled() const;
|
||||
|
||||
bool is_area_editable(Box3i p_box) const;
|
||||
VoxelSingleValue get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval);
|
||||
bool try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value);
|
||||
@ -139,12 +144,13 @@ public:
|
||||
|
||||
// TODO This still sucks atm cuz the edit will still run on the main thread
|
||||
void push_async_edit(IThreadedTask *task, Box3i box, std::shared_ptr<AsyncDependencyTracker> tracker);
|
||||
void process_async_edits();
|
||||
void abort_async_edits();
|
||||
|
||||
void set_voxel_bounds(Box3i p_box);
|
||||
|
||||
inline Box3i get_voxel_bounds() const {
|
||||
return _bounds_in_voxels;
|
||||
CRASH_COND(_update_data == nullptr);
|
||||
return _update_data->settings.bounds_in_voxels;
|
||||
}
|
||||
|
||||
void set_collision_update_delay(int delay_msec);
|
||||
@ -241,30 +247,12 @@ protected:
|
||||
void _on_gi_mode_changed() override;
|
||||
|
||||
private:
|
||||
struct BlockLocation {
|
||||
Vector3i position;
|
||||
uint8_t lod;
|
||||
};
|
||||
|
||||
void _process(float delta);
|
||||
void process_unload_data_blocks_sliding_box(Vector3 p_viewer_pos, std::vector<BlockToSave> &blocks_to_save);
|
||||
void process_unload_mesh_blocks_sliding_box(Vector3 p_viewer_pos);
|
||||
void process_octrees_sliding_box(Vector3 p_viewer_pos);
|
||||
void process_octrees_fitting(Vector3 p_viewer_pos, std::vector<BlockLocation> &data_blocks_to_load);
|
||||
//void process_block_loading_responses();
|
||||
void send_mesh_requests();
|
||||
void apply_deferred_update_tasks();
|
||||
|
||||
void apply_mesh_update(const VoxelServer::BlockMeshOutput &ob);
|
||||
void apply_data_block_response(VoxelServer::BlockDataOutput &ob);
|
||||
|
||||
void unload_data_block_no_lock(Vector3i block_pos, uint8_t lod_index, std::vector<BlockToSave> &blocks_to_save);
|
||||
void unload_mesh_block(Vector3i block_pos, uint8_t lod_index);
|
||||
|
||||
static inline bool check_block_sizes(int data_block_size, int mesh_block_size) {
|
||||
return (data_block_size == 16 || data_block_size == 32) && (mesh_block_size == 16 || mesh_block_size == 32) &&
|
||||
mesh_block_size >= data_block_size;
|
||||
}
|
||||
|
||||
void start_updater();
|
||||
void stop_updater();
|
||||
void start_streamer();
|
||||
@ -272,34 +260,19 @@ private:
|
||||
void reset_maps();
|
||||
|
||||
Vector3 get_local_viewer_pos() const;
|
||||
void try_schedule_loading_with_neighbors_no_lock(
|
||||
const Vector3i &p_data_block_pos, uint8_t lod_index, std::vector<BlockLocation> &blocks_to_load);
|
||||
bool is_block_surrounded(const Vector3i &p_bpos, int lod_index, const VoxelDataMap &map) const;
|
||||
bool check_block_loaded_and_meshed(
|
||||
const Vector3i &p_mesh_block_pos, uint8_t lod_index, std::vector<BlockLocation> &blocks_to_load);
|
||||
bool check_block_mesh_updated(VoxelMeshBlock *block, std::vector<BlockLocation> &blocks_to_load);
|
||||
void _set_lod_count(int p_lod_count);
|
||||
void set_mesh_block_active(VoxelMeshBlock &block, bool active);
|
||||
|
||||
std::shared_ptr<AsyncDependencyTracker> preload_boxes_async(
|
||||
Span<const Box3i> voxel_boxes, Span<IThreadedTask *> next_tasks);
|
||||
|
||||
void _on_stream_params_changed();
|
||||
|
||||
void flush_pending_lod_edits();
|
||||
void save_all_modified_blocks(bool with_copy);
|
||||
void send_block_data_requests(Span<const BlockLocation> blocks_to_load);
|
||||
|
||||
// TODO Put in common with VoxelLodTerrainUpdateTask
|
||||
void send_block_save_requests(Span<BlockToSave> blocks_to_save);
|
||||
|
||||
void process_deferred_collision_updates(uint32_t timeout_msec);
|
||||
void process_fading_blocks(float delta);
|
||||
|
||||
static void add_transition_update(
|
||||
VoxelMeshBlock *block, std::vector<VoxelMeshBlock *> &blocks_pending_transition_update);
|
||||
void add_transition_updates_around(
|
||||
Vector3i block_pos, int lod_index, std::vector<VoxelMeshBlock *> &blocks_pending_transition_update);
|
||||
void process_transition_updates(const std::vector<VoxelMeshBlock *> &blocks_pending_transition_update);
|
||||
uint8_t get_transition_mask(Vector3i block_pos, int lod_index) const;
|
||||
|
||||
void _b_save_modified_blocks();
|
||||
void _b_set_voxel_bounds(AABB aabb);
|
||||
AABB _b_get_voxel_bounds() const;
|
||||
@ -309,10 +282,6 @@ private:
|
||||
Error _b_debug_dump_as_scene(String fpath, bool include_instancer) const;
|
||||
Dictionary _b_get_statistics() const;
|
||||
|
||||
struct OctreeItem {
|
||||
LodOctree octree;
|
||||
};
|
||||
|
||||
#ifdef TOOLS_ENABLED
|
||||
void update_gizmos();
|
||||
#endif
|
||||
@ -320,23 +289,6 @@ private:
|
||||
static void _bind_methods();
|
||||
|
||||
private:
|
||||
// This terrain type is a sparse grid of octrees.
|
||||
// Indexed by a grid coordinate whose step is the size of the highest-LOD block.
|
||||
// Not using a pointer because Map storage is stable.
|
||||
// TODO Optimization: could be replaced with a grid data structure
|
||||
Map<Vector3i, OctreeItem> _lod_octrees;
|
||||
Box3i _last_octree_region_box;
|
||||
|
||||
// Area within which voxels can exist.
|
||||
// Note, these bounds might not be exactly represented. This volume is chunk-based, so the result will be
|
||||
// approximated to the closest chunk.
|
||||
Box3i _bounds_in_voxels;
|
||||
//Box3i _prev_bounds_in_voxels;
|
||||
|
||||
Ref<VoxelStream> _stream;
|
||||
Ref<VoxelGenerator> _generator;
|
||||
Ref<VoxelMesher> _mesher;
|
||||
|
||||
uint32_t _volume_id = 0;
|
||||
ProcessCallback _process_callback = PROCESS_CALLBACK_IDLE;
|
||||
|
||||
@ -349,57 +301,27 @@ private:
|
||||
unsigned int _collision_mask = 1;
|
||||
float _collision_margin = constants::DEFAULT_COLLISION_MARGIN;
|
||||
int _collision_update_delay = 0;
|
||||
FixedArray<std::vector<Vector3i>, constants::MAX_LOD> _deferred_collision_updates_per_lod;
|
||||
|
||||
float _lod_fade_duration = 0.f;
|
||||
// Note, direct pointers to mesh blocks should be safe because these blocks are always destroyed from the same
|
||||
// thread that updates fading blocks. If a mesh block is destroyed, these maps should be updated at the same time.
|
||||
// TODO Optimization: use FlatMap? Need to check how many blocks get in there, probably not many
|
||||
FixedArray<Map<Vector3i, VoxelMeshBlock *>, constants::MAX_LOD> _fading_blocks_per_lod;
|
||||
|
||||
VoxelInstancer *_instancer = nullptr;
|
||||
|
||||
struct AsyncEdit {
|
||||
IThreadedTask *task;
|
||||
Box3i box;
|
||||
std::shared_ptr<AsyncDependencyTracker> task_tracker;
|
||||
};
|
||||
|
||||
std::vector<AsyncEdit> _pending_async_edits;
|
||||
|
||||
struct RunningAsyncEdit {
|
||||
std::shared_ptr<AsyncDependencyTracker> tracker;
|
||||
Box3i box;
|
||||
};
|
||||
std::vector<RunningAsyncEdit> _running_async_edits;
|
||||
Ref<VoxelMesher> _mesher;
|
||||
Ref<VoxelGenerator> _generator;
|
||||
Ref<VoxelStream> _stream;
|
||||
|
||||
// Data stored with a shared pointer so it can be sent to asynchronous tasks
|
||||
bool _threaded_update_enabled = false;
|
||||
std::shared_ptr<VoxelDataLodMap> _data;
|
||||
std::shared_ptr<VoxelLodTerrainUpdateData> _update_data;
|
||||
std::shared_ptr<StreamingDependency> _streaming_dependency;
|
||||
std::shared_ptr<MeshingDependency> _meshing_dependency;
|
||||
|
||||
// Each LOD works in a set of coordinates spanning 2x more voxels the higher their index is
|
||||
struct Lod {
|
||||
// Keeping track of asynchronously loading blocks so we don't try to redundantly load them
|
||||
std::unordered_set<Vector3i> loading_blocks;
|
||||
// Blocks that were edited and need their LOD counterparts to be updated
|
||||
std::vector<Vector3i> blocks_pending_lodding;
|
||||
// These are relative to this LOD, in block coordinates
|
||||
Vector3i last_viewer_data_block_pos;
|
||||
int last_view_distance_data_blocks = 0;
|
||||
|
||||
VoxelMeshMap mesh_map;
|
||||
std::vector<Vector3i> blocks_pending_update;
|
||||
std::vector<Vector3i> deferred_collision_updates;
|
||||
Map<Vector3i, VoxelMeshBlock *> fading_blocks;
|
||||
Vector3i last_viewer_mesh_block_pos;
|
||||
int last_view_distance_mesh_blocks = 0;
|
||||
|
||||
inline bool has_loading_block(const Vector3i &pos) const {
|
||||
return loading_blocks.find(pos) != loading_blocks.end();
|
||||
}
|
||||
};
|
||||
|
||||
FixedArray<Lod, constants::MAX_LOD> _lods;
|
||||
unsigned int _lod_count = 0;
|
||||
// Distance between a viewer and the end of LOD0
|
||||
float _lod_distance = 0.f;
|
||||
float _lod_fade_duration = 0.f;
|
||||
unsigned int _view_distance_voxels = 512;
|
||||
bool _full_load_mode = false;
|
||||
|
||||
bool _run_stream_in_editor = true;
|
||||
#ifdef TOOLS_ENABLED
|
||||
bool _show_gizmos_enabled = false;
|
||||
bool _show_octree_bounds_gizmos = true;
|
||||
|
143
terrain/voxel_lod_terrain_update_data.h
Normal file
143
terrain/voxel_lod_terrain_update_data.h
Normal file
@ -0,0 +1,143 @@
|
||||
#ifndef VOXEL_LOD_TERRAIN_UPDATE_DATA_H
|
||||
#define VOXEL_LOD_TERRAIN_UPDATE_DATA_H
|
||||
|
||||
#include "../constants/voxel_constants.h"
|
||||
#include "../generators/voxel_generator.h"
|
||||
#include "../storage/voxel_data_map.h"
|
||||
#include "../streams/voxel_stream.h"
|
||||
#include "../util/fixed_array.h"
|
||||
#include "lod_octree.h"
|
||||
#include "voxel_mesh_map.h"
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
namespace zylann {
|
||||
|
||||
class AsyncDependencyTracker;
|
||||
|
||||
namespace voxel {
|
||||
|
||||
// struct VoxelMeshLodMap {
|
||||
// struct Lod {
|
||||
// VoxelMeshMap mesh_map;
|
||||
// RWLock mesh_map_lock;
|
||||
// };
|
||||
// FixedArray<Lod, constants::MAX_LOD> lods;
|
||||
// unsigned int lod_count;
|
||||
// };
|
||||
|
||||
// Settings and states needed for the multi-threaded part of the update loop of VoxelLodTerrain.
|
||||
// See `VoxelLodTerrainUpdateTask` for more info.
|
||||
struct VoxelLodTerrainUpdateData {
|
||||
struct OctreeItem {
|
||||
LodOctree octree;
|
||||
};
|
||||
|
||||
struct TransitionUpdate {
|
||||
Vector3i block_position;
|
||||
uint8_t transition_mask;
|
||||
};
|
||||
|
||||
// These values don't change during the update task.
|
||||
struct Settings {
|
||||
// Area within which voxels can exist.
|
||||
// Note, these bounds might not be exactly represented. This volume is chunk-based, so the result will be
|
||||
// approximated to the closest chunk.
|
||||
Box3i bounds_in_voxels;
|
||||
unsigned int lod_count = 0;
|
||||
// Distance between a viewer and the end of LOD0
|
||||
float lod_distance = 0.f;
|
||||
unsigned int view_distance_voxels = 512;
|
||||
bool full_load_mode = false;
|
||||
bool run_stream_in_editor = true;
|
||||
};
|
||||
|
||||
// Each LOD works in a set of coordinates spanning 2x more voxels the higher their index is
|
||||
struct Lod {
|
||||
// Keeping track of asynchronously loading blocks so we don't try to redundantly load them
|
||||
std::unordered_set<Vector3i> loading_blocks;
|
||||
BinaryMutex loading_blocks_mutex;
|
||||
|
||||
// These are relative to this LOD, in block coordinates
|
||||
Vector3i last_viewer_data_block_pos;
|
||||
int last_view_distance_data_blocks = 0;
|
||||
|
||||
VoxelMeshMap mesh_map;
|
||||
// Locked for writing when blocks get inserted or removed from the map.
|
||||
// If you need to lock more than one Lod, always do so in increasing order, to avoid deadlocks.
|
||||
// IMPORTANT:
|
||||
// - The update task only adds blocks to the map, and doesn't remove them
|
||||
// - Threads outside the update task must never add or remove blocks to the map (even with locking),
|
||||
// unless the task has finished running
|
||||
RWLock mesh_map_lock;
|
||||
|
||||
std::vector<Vector3i> blocks_pending_update;
|
||||
Vector3i last_viewer_mesh_block_pos;
|
||||
int last_view_distance_mesh_blocks = 0;
|
||||
|
||||
// Deferred outputs to main thread
|
||||
std::vector<Vector3i> mesh_blocks_to_unload;
|
||||
std::vector<TransitionUpdate> mesh_blocks_to_update_transitions;
|
||||
|
||||
inline bool has_loading_block(const Vector3i &pos) const {
|
||||
return loading_blocks.find(pos) != loading_blocks.end();
|
||||
}
|
||||
};
|
||||
|
||||
struct AsyncEdit {
|
||||
IThreadedTask *task;
|
||||
Box3i box;
|
||||
std::shared_ptr<AsyncDependencyTracker> task_tracker;
|
||||
};
|
||||
|
||||
struct RunningAsyncEdit {
|
||||
std::shared_ptr<AsyncDependencyTracker> tracker;
|
||||
Box3i box;
|
||||
};
|
||||
|
||||
// Data modified by the update task
|
||||
struct State {
|
||||
// This terrain type is a sparse grid of octrees.
|
||||
// Indexed by a grid coordinate whose step is the size of the highest-LOD block.
|
||||
// Not using a pointer because Map storage is stable.
|
||||
// TODO Optimization: could be replaced with a grid data structure
|
||||
Map<Vector3i, OctreeItem> lod_octrees;
|
||||
Box3i last_octree_region_box;
|
||||
|
||||
FixedArray<Lod, constants::MAX_LOD> lods;
|
||||
|
||||
// This is the entry point for notifying data changes, which will cause mesh updates.
|
||||
// Contains blocks that were edited and need their LOD counterparts to be updated.
|
||||
// Scheduling is only done at LOD0 because it is the only editable LOD.
|
||||
std::vector<Vector3i> blocks_pending_lodding_lod0;
|
||||
BinaryMutex blocks_pending_lodding_lod0_mutex;
|
||||
|
||||
std::vector<AsyncEdit> pending_async_edits;
|
||||
BinaryMutex pending_async_edits_mutex;
|
||||
std::vector<RunningAsyncEdit> running_async_edits;
|
||||
|
||||
// Deferred outputs to main thread
|
||||
std::vector<VoxelMeshBlock *> mesh_blocks_to_activate;
|
||||
std::vector<VoxelMeshBlock *> mesh_blocks_to_deactivate;
|
||||
};
|
||||
|
||||
// Set to true when the update task is finished
|
||||
std::atomic_bool task_is_complete;
|
||||
// Will be locked as long as the update task is running.
|
||||
BinaryMutex completion_mutex;
|
||||
|
||||
Settings settings;
|
||||
State state;
|
||||
|
||||
// After this call, no locking is necessary, as no other thread should be using the data.
|
||||
// However it can stall for longer, so prefer using it when doing structural changes, such as changing LOD count,
|
||||
// LOD distances, or the way the update logic runs.
|
||||
void wait_for_end_of_task() {
|
||||
MutexLock lock(completion_mutex);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace voxel
|
||||
} // namespace zylann
|
||||
|
||||
#endif // VOXEL_LOD_TERRAIN_UPDATE_DATA_H
|
1391
terrain/voxel_lod_terrain_update_task.cpp
Normal file
1391
terrain/voxel_lod_terrain_update_task.cpp
Normal file
File diff suppressed because it is too large
Load Diff
65
terrain/voxel_lod_terrain_update_task.h
Normal file
65
terrain/voxel_lod_terrain_update_task.h
Normal file
@ -0,0 +1,65 @@
|
||||
#ifndef VOXEL_LOD_TERRAIN_UPDATE_TASK_H
|
||||
#define VOXEL_LOD_TERRAIN_UPDATE_TASK_H
|
||||
|
||||
#include "../server/priority_dependency.h"
|
||||
#include "../util/tasks/threaded_task.h"
|
||||
#include "voxel_lod_terrain_update_data.h"
|
||||
|
||||
namespace zylann::voxel {
|
||||
|
||||
struct StreamingDependency;
|
||||
struct MeshingDependency;
|
||||
|
||||
// Runs a part of the update loop of a VoxelLodTerrain.
|
||||
// This part can run on another thread, so multiple terrains can update in parallel.
|
||||
// There must be only one running at once per terrain.
|
||||
// Note, this task does not include meshing and voxel generation. These are done with different tasks.
|
||||
//
|
||||
// IMPORTANT: The work done by this task must not involve any call to Godot's servers, directly or indirectly.
|
||||
// These are deferred to the main thread.
|
||||
//
|
||||
class VoxelLodTerrainUpdateTask : public IThreadedTask {
|
||||
public:
|
||||
VoxelLodTerrainUpdateTask(std::shared_ptr<VoxelDataLodMap> p_data,
|
||||
std::shared_ptr<VoxelLodTerrainUpdateData> p_update_data,
|
||||
std::shared_ptr<StreamingDependency> p_streaming_dependency,
|
||||
std::shared_ptr<MeshingDependency> p_meshing_dependency,
|
||||
std::shared_ptr<PriorityDependency::ViewersData> p_shared_viewers_data, Vector3 p_viewer_pos,
|
||||
bool p_request_instances, uint32_t p_volume_id, Transform3D p_volume_transform) :
|
||||
//
|
||||
_data(p_data),
|
||||
_update_data(p_update_data),
|
||||
_streaming_dependency(p_streaming_dependency),
|
||||
_meshing_dependency(p_meshing_dependency),
|
||||
_shared_viewers_data(p_shared_viewers_data),
|
||||
_viewer_pos(p_viewer_pos),
|
||||
_request_instances(p_request_instances),
|
||||
_volume_id(p_volume_id),
|
||||
_volume_transform(p_volume_transform) {}
|
||||
|
||||
void run(ThreadedTaskContext ctx) override;
|
||||
void apply_result() override {}
|
||||
|
||||
// Functions also used outside of this task
|
||||
|
||||
static void flush_pending_lod_edits(VoxelLodTerrainUpdateData::State &state, VoxelDataLodMap &data,
|
||||
Ref<VoxelGenerator> generator, bool full_load_mode);
|
||||
|
||||
static uint8_t get_transition_mask(
|
||||
const VoxelLodTerrainUpdateData::State &state, Vector3i block_pos, int lod_index, unsigned int lod_count);
|
||||
|
||||
private:
|
||||
std::shared_ptr<VoxelDataLodMap> _data;
|
||||
std::shared_ptr<VoxelLodTerrainUpdateData> _update_data;
|
||||
std::shared_ptr<StreamingDependency> _streaming_dependency;
|
||||
std::shared_ptr<MeshingDependency> _meshing_dependency;
|
||||
std::shared_ptr<PriorityDependency::ViewersData> _shared_viewers_data;
|
||||
Vector3 _viewer_pos;
|
||||
bool _request_instances;
|
||||
uint32_t _volume_id;
|
||||
Transform3D _volume_transform;
|
||||
};
|
||||
|
||||
} // namespace zylann::voxel
|
||||
|
||||
#endif // VOXEL_LOD_TERRAIN_UPDATE_TASK_H
|
@ -54,7 +54,7 @@ VoxelMeshBlock::~VoxelMeshBlock() {
|
||||
CRASH_COND(mesh.is_null());
|
||||
FreeMeshTask *task = memnew(FreeMeshTask());
|
||||
task->mesh = mesh;
|
||||
VoxelServer::get_singleton()->push_progressive_task(task);
|
||||
VoxelServer::get_singleton()->push_main_thread_progressive_task(task);
|
||||
}
|
||||
|
||||
void run() override {
|
||||
@ -181,6 +181,10 @@ void VoxelMeshBlock::set_mesh_state(MeshState ms) {
|
||||
_mesh_state = ms;
|
||||
}
|
||||
|
||||
void VoxelMeshBlock::set_mesh_state_if_equal(MeshState previous_state, MeshState new_state) {
|
||||
_mesh_state.compare_exchange_strong(previous_state, new_state);
|
||||
}
|
||||
|
||||
VoxelMeshBlock::MeshState VoxelMeshBlock::get_mesh_state() const {
|
||||
return _mesh_state;
|
||||
}
|
||||
|
@ -2,11 +2,13 @@
|
||||
#define VOXEL_MESH_BLOCK_H
|
||||
|
||||
#include "../constants/cube_tables.h"
|
||||
#include "../meshers/voxel_mesher.h"
|
||||
#include "../util/fixed_array.h"
|
||||
#include "../util/godot/direct_mesh_instance.h"
|
||||
#include "../util/godot/direct_static_body.h"
|
||||
#include "../util/ref_count.h"
|
||||
#include "../util/span.h"
|
||||
#include <atomic>
|
||||
|
||||
class Node3D;
|
||||
|
||||
@ -86,6 +88,7 @@ public:
|
||||
// State
|
||||
|
||||
void set_mesh_state(MeshState ms);
|
||||
void set_mesh_state_if_equal(MeshState previous_state, MeshState new_state);
|
||||
MeshState get_mesh_state() const;
|
||||
|
||||
void set_visible(bool visible);
|
||||
@ -149,7 +152,7 @@ private:
|
||||
bool _visible = false;
|
||||
|
||||
bool _parent_visible = true;
|
||||
MeshState _mesh_state = MESH_NEVER_UPDATED;
|
||||
std::atomic<MeshState> _mesh_state = MESH_NEVER_UPDATED;
|
||||
uint8_t _transition_mask = 0;
|
||||
};
|
||||
|
||||
|
@ -135,7 +135,7 @@ void VoxelMeshMap::queue_free_mesh_block(VoxelMeshBlock *block) {
|
||||
// We spread this out because of physics
|
||||
// TODO Could it be enough to do both render and physic deallocation with the task in ~VoxelMeshBlock()?
|
||||
struct FreeMeshBlockTask : public zylann::ITimeSpreadTask {
|
||||
void run() override {
|
||||
void run(TimeSpreadTaskContext &ctx) override {
|
||||
memdelete(block);
|
||||
}
|
||||
VoxelMeshBlock *block = nullptr;
|
||||
@ -143,7 +143,7 @@ void VoxelMeshMap::queue_free_mesh_block(VoxelMeshBlock *block) {
|
||||
ERR_FAIL_COND(block == nullptr);
|
||||
FreeMeshBlockTask *task = memnew(FreeMeshBlockTask);
|
||||
task->block = block;
|
||||
VoxelServer::get_singleton()->push_time_spread_task(task);
|
||||
VoxelServer::get_singleton()->push_main_thread_time_spread_task(task);
|
||||
}
|
||||
|
||||
bool VoxelMeshMap::has_block(Vector3i pos) const {
|
||||
|
@ -29,7 +29,7 @@ VoxelTerrain::VoxelTerrain() {
|
||||
_bounds_in_voxels = Box3i::from_center_extents(Vector3i(), Vector3iUtil::create(constants::MAX_VOLUME_EXTENT));
|
||||
|
||||
struct ApplyMeshUpdateTask : public ITimeSpreadTask {
|
||||
void run() override {
|
||||
void run(TimeSpreadTaskContext &ctx) override {
|
||||
if (!VoxelServer::get_singleton()->is_volume_valid(volume_id)) {
|
||||
// The node can have been destroyed while this task was still pending
|
||||
PRINT_VERBOSE("Cancelling ApplyMeshUpdateTask, volume_id is invalid");
|
||||
@ -53,7 +53,7 @@ VoxelTerrain::VoxelTerrain() {
|
||||
task->volume_id = self->_volume_id;
|
||||
task->self = self;
|
||||
task->data = ob;
|
||||
VoxelServer::get_singleton()->push_time_spread_task(task);
|
||||
VoxelServer::get_singleton()->push_main_thread_time_spread_task(task);
|
||||
};
|
||||
callbacks.data_output_callback = [](void *cb_data, VoxelServer::BlockDataOutput &ob) {
|
||||
VoxelTerrain *self = reinterpret_cast<VoxelTerrain *>(cb_data);
|
||||
|
@ -125,8 +125,11 @@ Ref<ConcavePolygonShape3D> create_concave_polygon_shape(Span<const Array> surfac
|
||||
}
|
||||
|
||||
Ref<ConcavePolygonShape3D> shape;
|
||||
shape.instantiate();
|
||||
shape->set_faces(face_points);
|
||||
{
|
||||
VOXEL_PROFILE_SCOPE_NAMED("Godot shape");
|
||||
shape.instantiate();
|
||||
shape->set_faces(face_points);
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ inline std::shared_ptr<T> gd_make_shared(Arg0_T arg0, Arg1_T arg1, Arg2_T arg2)
|
||||
// For use with smart pointers such as std::unique_ptr
|
||||
template <typename T>
|
||||
struct GodotObjectDeleter {
|
||||
void operator()(T *obj) {
|
||||
inline void operator()(T *obj) {
|
||||
memdelete(obj);
|
||||
}
|
||||
};
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define VOXEL_PROFILE_MARK_FRAME() FrameMark
|
||||
#define VOXEL_PROFILE_SET_THREAD_NAME(name) tracy::SetThreadName(name)
|
||||
#define VOXEL_PROFILE_PLOT(name, number) TracyPlot(name, number)
|
||||
#define VOXEL_PROFILE_MESSAGE(message) TracyMessageL(message)
|
||||
|
||||
#else
|
||||
|
||||
@ -21,6 +22,7 @@
|
||||
#define VOXEL_PROFILE_SCOPE_NAMED(name)
|
||||
#define VOXEL_PROFILE_MARK_FRAME()
|
||||
#define VOXEL_PROFILE_PLOT(name, number)
|
||||
#define VOXEL_PROFILE_MESSAGE(message)
|
||||
// Name must be const char*. An internal copy will be made so it can be temporary.
|
||||
#define VOXEL_PROFILE_SET_THREAD_NAME(name)
|
||||
|
||||
|
@ -11,38 +11,67 @@ TimeSpreadTaskRunner::~TimeSpreadTaskRunner() {
|
||||
}
|
||||
|
||||
void TimeSpreadTaskRunner::push(ITimeSpreadTask *task) {
|
||||
MutexLock lock(_tasks_mutex);
|
||||
_tasks.push(task);
|
||||
}
|
||||
|
||||
void TimeSpreadTaskRunner::push(Span<ITimeSpreadTask *> tasks) {
|
||||
MutexLock lock(_tasks_mutex);
|
||||
for (unsigned int i = 0; i < tasks.size(); ++i) {
|
||||
_tasks.push(tasks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void TimeSpreadTaskRunner::process(uint64_t time_budget_usec) {
|
||||
VOXEL_PROFILE_SCOPE();
|
||||
const Time &time = *Time::get_singleton();
|
||||
|
||||
if (_tasks.size() > 0) {
|
||||
const uint64_t time_before = time.get_ticks_usec();
|
||||
static thread_local std::vector<ITimeSpreadTask *> tls_postponed_tasks;
|
||||
CRASH_COND(tls_postponed_tasks.size() > 0);
|
||||
|
||||
// Do at least one task
|
||||
do {
|
||||
ITimeSpreadTask *task = _tasks.front();
|
||||
const uint64_t time_before = time.get_ticks_usec();
|
||||
|
||||
// Do at least one task
|
||||
do {
|
||||
ITimeSpreadTask *task;
|
||||
{
|
||||
MutexLock lock(_tasks_mutex);
|
||||
if (_tasks.size() == 0) {
|
||||
break;
|
||||
}
|
||||
task = _tasks.front();
|
||||
_tasks.pop();
|
||||
task->run();
|
||||
}
|
||||
|
||||
TimeSpreadTaskContext ctx;
|
||||
task->run(ctx);
|
||||
|
||||
if (ctx.postpone) {
|
||||
tls_postponed_tasks.push_back(task);
|
||||
} else {
|
||||
// TODO Call recycling function instead?
|
||||
memdelete(task);
|
||||
}
|
||||
|
||||
} while (_tasks.size() > 0 && time.get_ticks_usec() - time_before < time_budget_usec);
|
||||
} while (time.get_ticks_usec() - time_before < time_budget_usec);
|
||||
|
||||
if (tls_postponed_tasks.size() > 0) {
|
||||
push(to_span(tls_postponed_tasks));
|
||||
tls_postponed_tasks.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void TimeSpreadTaskRunner::flush() {
|
||||
while (!_tasks.empty()) {
|
||||
ITimeSpreadTask *task = _tasks.front();
|
||||
_tasks.pop();
|
||||
task->run();
|
||||
memdelete(task);
|
||||
// Note, it is assumed no other threads can push tasks anymore.
|
||||
// It is up to the caller to stop them before flushing.
|
||||
while (get_pending_count() != 0) {
|
||||
process(100);
|
||||
// Sleep?
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int TimeSpreadTaskRunner::get_pending_count() const {
|
||||
MutexLock lock(_tasks_mutex);
|
||||
return _tasks.size();
|
||||
}
|
||||
|
||||
|
@ -1,29 +1,43 @@
|
||||
#ifndef ZYLANN_TIME_SPREAD_TASK_RUNNER_H
|
||||
#define ZYLANN_TIME_SPREAD_TASK_RUNNER_H
|
||||
|
||||
#include "../span.h"
|
||||
#include <core/os/mutex.h>
|
||||
#include <cstdint>
|
||||
#include <queue>
|
||||
|
||||
namespace zylann {
|
||||
|
||||
struct TimeSpreadTaskContext {
|
||||
// If this is set to `true` by a task,
|
||||
// it will be re-scheduled to run again, the next time the runner is processed.
|
||||
// Otherwise, the task will be destroyed after it runs.
|
||||
bool postpone = false;
|
||||
};
|
||||
|
||||
class ITimeSpreadTask {
|
||||
public:
|
||||
virtual ~ITimeSpreadTask() {}
|
||||
virtual void run() = 0;
|
||||
virtual void run(TimeSpreadTaskContext &ctx) = 0;
|
||||
};
|
||||
|
||||
// Runs tasks in the caller thread, within a time budget per call.
|
||||
// Runs tasks in the caller thread, within a time budget per call. Kind of like coroutines.
|
||||
class TimeSpreadTaskRunner {
|
||||
public:
|
||||
~TimeSpreadTaskRunner();
|
||||
|
||||
// Pushing is thread-safe.
|
||||
void push(ITimeSpreadTask *task);
|
||||
void push(Span<ITimeSpreadTask *> tasks);
|
||||
|
||||
void process(uint64_t time_budget_usec);
|
||||
void flush();
|
||||
unsigned int get_pending_count() const;
|
||||
|
||||
private:
|
||||
// TODO Optimization: naive thread safety. Should be enough for now.
|
||||
std::queue<ITimeSpreadTask *> _tasks;
|
||||
BinaryMutex _tasks_mutex;
|
||||
};
|
||||
|
||||
} // namespace zylann
|
||||
|
Loading…
x
Reference in New Issue
Block a user