Added full load mode to VoxelLodTerrain

With this mode enabled, editing anywhere becomes possible.
All edited blocks are loaded at once when the stream is assigned.
The generator is used on the fly to fill the gaps, instead of being
pre-emptively cached only nearby the viewer to allow editing.
Editing is still on the main thread, so this can cause stalls if
the generator is complex. There is also no controlled limit on how
much data gets loaded, so if edited data is larger than RAM, it will
not be playable. The feature is already quite usable, but more work
may be done for wider viability.
master
Marc Gilleron 2021-10-03 01:48:07 +01:00
parent e6f1be6ac3
commit e04091f3cb
20 changed files with 741 additions and 174 deletions

View File

@ -92,14 +92,14 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
// TODO Implement reverse raycast? (going from inside ground to air, could be useful for undigging)
struct RaycastPredicate {
const VoxelLodTerrain *terrain;
VoxelLodTerrain *terrain;
bool operator()(Vector3i pos) {
// This is not particularly optimized, but runs fast enough for player raycasts
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf < 0;
VoxelSingleValue defval;
defval.f = 1.f;
const VoxelSingleValue v = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, defval);
return v.f < 0;
}
};
@ -135,13 +135,13 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
if (_raycast_binary_search_iterations > 0) {
// This is not particularly optimized, but runs fast enough for player raycasts
struct VolumeSampler {
const VoxelLodTerrain *terrain;
VoxelLodTerrain *terrain;
inline float operator()(const Vector3i &pos) const {
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf;
VoxelSingleValue defval;
defval.f = 1.f;
const VoxelSingleValue value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, defval);
return value.f;
}
};
@ -220,27 +220,29 @@ void VoxelToolLodTerrain::copy(Vector3i pos, Ref<VoxelBuffer> dst, uint8_t chann
float VoxelToolLodTerrain::get_voxel_f_interpolated(Vector3 position) const {
ERR_FAIL_COND_V(_terrain == nullptr, 0);
const int channel = get_channel();
const VoxelLodTerrain *terrain = _terrain;
VoxelLodTerrain *terrain = _terrain;
// TODO Optimization: is it worth a making a fast-path for this?
return get_sdf_interpolated([terrain, channel](Vector3i ipos) {
const uint64_t raw_value = terrain->get_voxel(ipos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf;
VoxelSingleValue defval;
defval.f = 1.f;
VoxelSingleValue value = terrain->get_voxel(ipos, VoxelBufferInternal::CHANNEL_SDF, defval);
return value.f;
},
position);
}
uint64_t VoxelToolLodTerrain::_get_voxel(Vector3i pos) const {
ERR_FAIL_COND_V(_terrain == nullptr, 0);
return _terrain->get_voxel(pos, _channel, 0);
VoxelSingleValue defval;
defval.i = 0;
return _terrain->get_voxel(pos, _channel, defval).i;
}
float VoxelToolLodTerrain::_get_voxel_f(Vector3i pos) const {
ERR_FAIL_COND_V(_terrain == nullptr, 0);
const uint64_t raw_value = _terrain->get_voxel(pos, _channel, 0);
// TODO Format should be accessible from terrain
return u16_to_norm(raw_value);
VoxelSingleValue defval;
defval.f = 1.f;
return _terrain->get_voxel(pos, _channel, defval).f;
}
void VoxelToolLodTerrain::_set_voxel(Vector3i pos, uint64_t v) {

View File

@ -418,16 +418,21 @@ VoxelGenerator::Result VoxelGeneratorGraph::generate_block(VoxelBlockRequest &in
// Clip threshold must be higher for higher lod indexes because distances for one sampled voxel are also larger
const float clip_threshold = sdf_scale * _sdf_clip_threshold * stride;
// TODO Allow non-cubic block size when not using subdivision
const int section_size = _use_subdivision ? _subdivision_size : min(min(bs.x, bs.y), bs.z);
// Block size must be a multiple of section size
ERR_FAIL_COND_V(bs.x % section_size != 0, result);
ERR_FAIL_COND_V(bs.y % section_size != 0, result);
ERR_FAIL_COND_V(bs.z % section_size != 0, result);
// Block size must be a multiple of section size, as all sections must have the same size
const bool can_use_subdivision =
(bs.x % _subdivision_size == 0) &&
(bs.y % _subdivision_size == 0) &&
(bs.z % _subdivision_size == 0);
const Vector3i section_size = _use_subdivision && can_use_subdivision ? Vector3i(_subdivision_size) : bs;
// ERR_FAIL_COND_V(bs.x % section_size != 0, result);
// ERR_FAIL_COND_V(bs.y % section_size != 0, result);
// ERR_FAIL_COND_V(bs.z % section_size != 0, result);
Cache &cache = _cache;
const unsigned int slice_buffer_size = section_size * section_size;
// Slice is on the Y axis
const unsigned int slice_buffer_size = section_size.x * section_size.z;
VoxelGraphRuntime &runtime = runtime_ptr->runtime;
runtime.prepare_state(cache.state, slice_buffer_size);
@ -448,9 +453,9 @@ VoxelGenerator::Result VoxelGeneratorGraph::generate_block(VoxelBlockRequest &in
bool all_sdf_is_uniform = true;
// For each subdivision of the block
for (int sz = 0; sz < bs.z; sz += section_size) {
for (int sy = 0; sy < bs.y; sy += section_size) {
for (int sx = 0; sx < bs.x; sx += section_size) {
for (int sz = 0; sz < bs.z; sz += section_size.z) {
for (int sy = 0; sy < bs.y; sy += section_size.y) {
for (int sx = 0; sx < bs.x; sx += section_size.x) {
VOXEL_PROFILE_SCOPE_NAMED("Section");
const Vector3i rmin(sx, sy, sz);
@ -1044,22 +1049,28 @@ void VoxelGeneratorGraph::bake_sphere_normalmap(Ref<Image> im, float ref_radius,
for_chunks_2d(im->get_width(), im->get_height(), 32, pc);
}
// TODO This function isn't used yet, but whatever uses it should probably put locking and cache outside
float VoxelGeneratorGraph::generate_single(const Vector3i &position) {
VoxelSingleValue VoxelGeneratorGraph::generate_single(Vector3i position, unsigned int channel) {
// TODO Support other channels
VoxelSingleValue v;
v.i = 0;
if (channel != VoxelBufferInternal::CHANNEL_SDF) {
return v;
}
std::shared_ptr<const Runtime> runtime_ptr;
{
RWLockRead rlock(_runtime_lock);
runtime_ptr = _runtime;
}
ERR_FAIL_COND_V(runtime_ptr == nullptr, 0.f);
ERR_FAIL_COND_V(runtime_ptr == nullptr, v);
Cache &cache = _cache;
const VoxelGraphRuntime &runtime = runtime_ptr->runtime;
runtime.prepare_state(cache.state, 1);
runtime.generate_single(cache.state, position.to_vec3(), nullptr);
const VoxelGraphRuntime::Buffer &buffer = cache.state.get_buffer(runtime_ptr->sdf_output_buffer_index);
ERR_FAIL_COND_V(buffer.size == 0, 0.f);
ERR_FAIL_COND_V(buffer.data == nullptr, 0.f);
return buffer.data[0];
ERR_FAIL_COND_V(buffer.size == 0, v);
ERR_FAIL_COND_V(buffer.data == nullptr, v);
v.f = buffer.data[0];
return v;
}
// Note, this wrapper may not be used for main generation tasks.
@ -1442,7 +1453,7 @@ void VoxelGeneratorGraph::_b_set_node_param_null(int node_id, int param_index) {
}
float VoxelGeneratorGraph::_b_generate_single(Vector3 pos) {
return generate_single(Vector3i(pos));
return generate_single(Vector3i(pos), VoxelBufferInternal::CHANNEL_SDF).f;
}
Vector2 VoxelGeneratorGraph::_b_debug_analyze_range(Vector3 min_pos, Vector3 max_pos) const {
@ -1522,7 +1533,7 @@ void VoxelGeneratorGraph::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_node_type_count"), &VoxelGeneratorGraph::_b_get_node_type_count);
ClassDB::bind_method(D_METHOD("get_node_type_info", "type_id"), &VoxelGeneratorGraph::_b_get_node_type_info);
ClassDB::bind_method(D_METHOD("generate_single"), &VoxelGeneratorGraph::_b_generate_single);
//ClassDB::bind_method(D_METHOD("generate_single"), &VoxelGeneratorGraph::_b_generate_single);
ClassDB::bind_method(D_METHOD("debug_analyze_range", "min_pos", "max_pos"),
&VoxelGeneratorGraph::_b_debug_analyze_range);

View File

@ -126,7 +126,9 @@ public:
int get_used_channels_mask() const override;
Result generate_block(VoxelBlockRequest &input) override;
float generate_single(const Vector3i &position);
//float generate_single(const Vector3i &position);
bool supports_single_generation() const override { return true; }
VoxelSingleValue generate_single(Vector3i position, unsigned int channel) override;
Ref<Resource> duplicate(bool p_subresources) const override;

View File

@ -12,6 +12,21 @@ int VoxelGenerator::get_used_channels_mask() const {
return 0;
}
VoxelSingleValue VoxelGenerator::generate_single(Vector3i pos, unsigned int channel) {
// Default slow implementation
VoxelBufferInternal buffer;
buffer.create(1, 1, 1);
VoxelBlockRequest r{ buffer, pos, 0 };
generate_block(r);
VoxelSingleValue v;
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
v.f = buffer.get_voxel_f(0, 0, 0, channel);
} else {
v.i = buffer.get_voxel(0, 0, 0, channel);
}
return v;
}
void VoxelGenerator::_b_generate_block(Ref<VoxelBuffer> out_buffer, Vector3 origin_in_voxels, int lod) {
ERR_FAIL_COND(lod < 0);
ERR_FAIL_COND(out_buffer.is_null());

View File

@ -4,6 +4,11 @@
#include "../streams/voxel_block_request.h"
#include <core/resource.h>
union VoxelSingleValue {
uint64_t i;
float f;
};
// Provides access to read-only generated voxels.
// Must be implemented in a multi-thread-safe way.
class VoxelGenerator : public Resource {
@ -22,6 +27,16 @@ public:
virtual Result generate_block(VoxelBlockRequest &input);
// TODO Single sample
virtual bool supports_single_generation() const { return false; }
// TODO Not sure if it's a good API regarding performance
virtual VoxelSingleValue generate_single(Vector3i pos, unsigned int channel);
// virtual void generate_series(
// Span<const Vector3> positions,
// Span<const uint8_t> channels,
// Span<Span<VoxelSingleValue>> out_values);
// Declares the channels this generator will use
virtual int get_used_channels_mask() const;

View File

@ -24,7 +24,7 @@ static const unsigned int MAX_TEXTURE_BLENDS = 4;
enum TexturingMode {
TEXTURES_NONE,
// Blends the 4 most-represented textures in the given block, ignoring the others.
// Texture indices and blend factors have 4-bit precision (maximum 16 textures),
// Texture indices and blend factors have 4-bit precision (maximum 16 textures and 16 transition gradients),
// and are respectively encoded in UV and UV2.
TEXTURES_BLEND_4_OVER_16
};

View File

@ -265,13 +265,27 @@ void VoxelServer::set_volume_generator(uint32_t volume_id, Ref<VoxelGenerator> g
volume.stream_dependency = gd_make_shared<StreamingDependency>();
volume.stream_dependency->generator = volume.generator;
volume.stream_dependency->stream = volume.stream;
if (volume.meshing_dependency != nullptr) {
volume.meshing_dependency->valid = false;
}
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
volume.meshing_dependency->mesher = volume.mesher;
volume.meshing_dependency->generator = volume.generator;
}
void VoxelServer::set_volume_mesher(uint32_t volume_id, Ref<VoxelMesher> mesher) {
Volume &volume = _world.volumes.get(volume_id);
volume.mesher = mesher;
if (volume.meshing_dependency != nullptr) {
volume.meshing_dependency->valid = false;
}
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
volume.meshing_dependency->mesher = volume.mesher;
volume.meshing_dependency->generator = volume.generator;
}
void VoxelServer::set_volume_octree_lod_distance(uint32_t volume_id, float lod_distance) {
@ -284,6 +298,7 @@ void VoxelServer::invalidate_volume_mesh_requests(uint32_t volume_id) {
volume.meshing_dependency->valid = false;
volume.meshing_dependency = gd_make_shared<MeshingDependency>();
volume.meshing_dependency->mesher = volume.mesher;
volume.meshing_dependency->generator = volume.generator;
}
static inline Vector3i get_block_center(Vector3i pos, int bs, int lod) {
@ -325,6 +340,7 @@ void VoxelServer::init_priority_dependency(
void VoxelServer::request_block_mesh(uint32_t volume_id, const BlockMeshInput &input) {
const Volume &volume = _world.volumes.get(volume_id);
ERR_FAIL_COND(volume.meshing_dependency == nullptr);
ERR_FAIL_COND(volume.data_block_size > 255);
BlockMeshRequest *r = memnew(BlockMeshRequest);
r->volume_id = volume_id;
@ -333,6 +349,7 @@ void VoxelServer::request_block_mesh(uint32_t volume_id, const BlockMeshInput &i
r->position = input.render_block_position;
r->lod = input.lod;
r->meshing_dependency = volume.meshing_dependency;
r->data_block_size = volume.data_block_size;
init_priority_dependency(
r->priority_dependency, input.render_block_position, input.lod, volume, volume.render_block_size);
@ -376,6 +393,19 @@ void VoxelServer::request_block_load(uint32_t volume_id, Vector3i block_pos, int
}
}
void VoxelServer::request_all_stream_blocks(uint32_t volume_id) {
PRINT_VERBOSE(String("Request all blocks for volume {0}").format(varray(volume_id)));
const Volume &volume = _world.volumes.get(volume_id);
ERR_FAIL_COND(volume.stream.is_null());
CRASH_COND(volume.stream_dependency == nullptr);
AllBlocksDataRequest *r = memnew(AllBlocksDataRequest);
r->volume_id = volume_id;
r->stream_dependency = volume.stream_dependency;
_general_thread_pool.enqueue(r);
}
void VoxelServer::request_voxel_block_save(uint32_t volume_id, std::shared_ptr<VoxelBufferInternal> voxels,
Vector3i block_pos, int lod) {
//
@ -620,7 +650,6 @@ Dictionary VoxelServer::Stats::to_dict() {
Dictionary mem;
mem["voxel_total"] = VoxelMemoryPool::get_singleton()->debug_get_total_memory();
mem["voxel_used"] = VoxelMemoryPool::get_singleton()->debug_get_used_memory();
mem["block_count"] = VoxelMemoryPool::get_singleton()->debug_get_used_blocks();
Dictionary d;
d["thread_pools"] = pools;
@ -808,6 +837,67 @@ void VoxelServer::BlockDataRequest::apply_result() {
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VoxelServer::AllBlocksDataRequest::AllBlocksDataRequest() {
}
VoxelServer::AllBlocksDataRequest::~AllBlocksDataRequest() {
}
void VoxelServer::AllBlocksDataRequest::run(VoxelTaskContext ctx) {
VOXEL_PROFILE_SCOPE();
CRASH_COND(stream_dependency == nullptr);
Ref<VoxelStream> stream = stream_dependency->stream;
CRASH_COND(stream.is_null());
stream->load_all_blocks(result);
PRINT_VERBOSE(String("Loaded {0} blocks for volume {1}").format(varray(result.blocks.size(), volume_id)));
}
int VoxelServer::AllBlocksDataRequest::get_priority() {
return 0;
}
bool VoxelServer::AllBlocksDataRequest::is_cancelled() {
return !stream_dependency->valid;
}
void VoxelServer::AllBlocksDataRequest::apply_result() {
Volume *volume = VoxelServer::get_singleton()->_world.volumes.try_get(volume_id);
if (volume != nullptr) {
// TODO Comparing pointer may not be guaranteed
// The request response must match the dependency it would have been requested with.
// If it doesn't match, we are no longer interested in the result.
if (stream_dependency == volume->stream_dependency) {
std::vector<BlockDataOutput> &data_output = volume->reception_buffers->data_output;
size_t dst_i = data_output.size();
data_output.resize(data_output.size() + result.blocks.size());
for (auto it = result.blocks.begin(); it != result.blocks.end(); ++it) {
VoxelStream::FullLoadingResult::Block &rb = *it;
BlockDataOutput &o = data_output[dst_i];
o.voxels = rb.voxels;
o.instances = std::move(rb.instances_data);
o.position = rb.position;
o.lod = rb.lod;
o.dropped = false;
o.max_lod_hint = false;
++dst_i;
}
}
} else {
// This can happen if the user removes the volume while requests are still about to return
PRINT_VERBOSE("Stream data request response came back but volume wasn't found");
}
}
//----------------------------------------------------------------------------------------------------------------------
VoxelServer::BlockGenerateRequest::BlockGenerateRequest() {
@ -887,18 +977,14 @@ void VoxelServer::BlockGenerateRequest::apply_result() {
// Voxels from central blocks are copied, and part of side blocks are also copied so we get a temporary buffer
// which includes enough neighbors for the mesher to avoid doing bound checks.
static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>> blocks, VoxelBufferInternal &dst,
int min_padding, int max_padding, int channels_mask) {
int min_padding, int max_padding, int channels_mask, Ref<VoxelGenerator> generator, int data_block_size,
int lod_index, Vector3i mesh_block_pos) {
VOXEL_PROFILE_SCOPE();
// Extract wanted channels in a list
FixedArray<uint8_t, VoxelBuffer::MAX_CHANNELS> channels;
unsigned int channels_count = 0;
for (unsigned int i = 0; i < VoxelBuffer::MAX_CHANNELS; ++i) {
if ((channels_mask & (1 << i)) != 0) {
channels[channels_count] = i;
++channels_count;
}
}
FixedArray<uint8_t, VoxelBuffer::MAX_CHANNELS> channels =
VoxelBufferInternal::mask_to_channels_list(channels_mask, channels_count);
// Determine size of the cube of blocks
int edge_size;
@ -920,29 +1006,37 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
const unsigned int anchor_buffer_index = edge_size * edge_size + edge_size + 1;
std::shared_ptr<VoxelBufferInternal> &central_buffer = blocks[anchor_buffer_index];
ERR_FAIL_COND_MSG(central_buffer == nullptr, "Central buffer must be valid");
ERR_FAIL_COND_MSG(central_buffer->get_size().all_members_equal() == false, "Central buffer must be cubic");
const int data_block_size = central_buffer->get_size().x;
ERR_FAIL_COND_MSG(central_buffer == nullptr && generator.is_null(), "Central buffer must be valid");
if (central_buffer != nullptr) {
ERR_FAIL_COND_MSG(central_buffer->get_size().all_members_equal() == false, "Central buffer must be cubic");
}
const int mesh_block_size = data_block_size * mesh_block_size_factor;
const int padded_mesh_block_size = mesh_block_size + min_padding + max_padding;
dst.create(padded_mesh_block_size, padded_mesh_block_size, padded_mesh_block_size);
for (unsigned int ci = 0; ci < channels.size(); ++ci) {
dst.set_channel_depth(ci, central_buffer->get_channel_depth(ci));
}
// TODO Need to provide format
// for (unsigned int ci = 0; ci < channels.size(); ++ci) {
// dst.set_channel_depth(ci, central_buffer->get_channel_depth(ci));
// }
const Vector3i min_pos = -Vector3i(min_padding);
const Vector3i max_pos = Vector3i(mesh_block_size + max_padding);
std::vector<Box3i> boxes_to_generate;
const Box3i mesh_data_box = Box3i::from_min_max(min_pos, max_pos);
if (generator.is_valid()) {
boxes_to_generate.push_back(mesh_data_box);
}
// Using ZXY as convention to reconstruct positions with thread locking consistency
unsigned int i = 0;
unsigned int block_index = 0;
for (int z = -1; z < edge_size - 1; ++z) {
for (int x = -1; x < edge_size - 1; ++x) {
for (int y = -1; y < edge_size - 1; ++y) {
const Vector3i offset = data_block_size * Vector3i(x, y, z);
const std::shared_ptr<VoxelBufferInternal> &src = blocks[i];
++i;
const std::shared_ptr<VoxelBufferInternal> &src = blocks[block_index];
++block_index;
if (src == nullptr) {
continue;
@ -957,6 +1051,47 @@ static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>>
dst.copy_from(*src, src_min, src_max, Vector3(), channels[ci]);
}
}
if (generator.is_valid()) {
// Subtract edited box from the area to generate
// TODO This approach allows to batch boxes if necessary,
// but is it just better to do it anyways for every clipped box?
VOXEL_PROFILE_SCOPE_NAMED("Box subtract");
unsigned int count = boxes_to_generate.size();
Box3i block_box = Box3i(offset, Vector3i(data_block_size)).clipped(mesh_data_box);
for (unsigned int box_index = 0; box_index < count; ++box_index) {
Box3i box = boxes_to_generate[box_index];
box.difference(block_box, boxes_to_generate);
#ifdef DEBUG_ENABLED
CRASH_COND(box_index >= boxes_to_generate.size());
#endif
boxes_to_generate[box_index] = boxes_to_generate.back();
boxes_to_generate.pop_back();
}
}
}
}
}
if (generator.is_valid()) {
// Complete data with generated voxels
VOXEL_PROFILE_SCOPE_NAMED("Generate");
VoxelBufferInternal generated_voxels;
const Vector3i origin_in_voxels = mesh_block_pos * (mesh_block_size_factor * data_block_size << lod_index);
for (unsigned int i = 0; i < boxes_to_generate.size(); ++i) {
const Box3i &box = boxes_to_generate[i];
//print_line(String("size={0}").format(varray(box.size.to_vec3())));
generated_voxels.create(box.size);
//generated_voxels.set_voxel_f(2.0f, box.size.x / 2, box.size.y / 2, box.size.z / 2, VoxelBufferInternal::CHANNEL_SDF);
VoxelBlockRequest r{ generated_voxels, (box.pos << lod_index) + origin_in_voxels, lod_index };
generator->generate_block(r);
for (unsigned int ci = 0; ci < channels_count; ++ci) {
dst.copy_from(generated_voxels, Vector3i(), generated_voxels.get_size(),
box.pos + Vector3i(min_padding), channels[ci]);
}
}
}
@ -982,7 +1117,8 @@ void VoxelServer::BlockMeshRequest::run(VoxelTaskContext ctx) {
// TODO Cache?
VoxelBufferInternal voxels;
copy_block_and_neighbors(to_span(blocks, blocks_count),
voxels, min_padding, max_padding, mesher->get_used_channels_mask());
voxels, min_padding, max_padding, mesher->get_used_channels_mask(),
meshing_dependency->generator, data_block_size, lod, position);
const VoxelMesher::Input input = { voxels, lod };
mesher->build(surfaces_output, input);

View File

@ -117,7 +117,9 @@ public:
void set_volume_octree_lod_distance(uint32_t volume_id, float lod_distance);
void invalidate_volume_mesh_requests(uint32_t volume_id);
void request_block_mesh(uint32_t volume_id, const BlockMeshInput &input);
// TODO Add parameter to skip stream loading
void request_block_load(uint32_t volume_id, Vector3i block_pos, int lod, bool request_instances);
void request_all_stream_blocks(uint32_t volume_id);
void request_voxel_block_save(uint32_t volume_id, std::shared_ptr<VoxelBufferInternal> voxels, Vector3i block_pos,
int lod);
void request_instance_block_save(uint32_t volume_id, std::unique_ptr<VoxelInstanceBlockData> instances,
@ -222,6 +224,7 @@ private:
struct MeshingDependency {
Ref<VoxelMesher> mesher;
Ref<VoxelGenerator> generator;
bool valid = true;
};
@ -300,6 +303,21 @@ private:
// TODO Find a way to separate save, it doesnt need sorting
};
class AllBlocksDataRequest : public IVoxelTask {
public:
AllBlocksDataRequest();
~AllBlocksDataRequest();
void run(VoxelTaskContext ctx) override;
int get_priority() override;
bool is_cancelled() override;
void apply_result() override;
VoxelStream::FullLoadingResult result;
uint32_t volume_id;
std::shared_ptr<StreamingDependency> stream_dependency;
};
class BlockGenerateRequest : public IVoxelTask {
public:
BlockGenerateRequest();
@ -333,10 +351,13 @@ private:
void apply_result() override;
FixedArray<std::shared_ptr<VoxelBufferInternal>, VoxelConstants::MAX_BLOCK_COUNT_PER_REQUEST> blocks;
// TODO Need to provide format
//FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channel_depths;
Vector3i position; // In mesh blocks of the specified lod
uint32_t volume_id;
uint8_t lod;
uint8_t blocks_count;
uint8_t data_block_size;
bool has_run = false;
bool too_far = false;
PriorityDependency priority_dependency;

View File

@ -185,8 +185,12 @@ void VoxelBufferInternal::clear() {
void VoxelBufferInternal::clear_channel(unsigned int channel_index, uint64_t clear_value) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
clear_channel(channel, clear_value);
}
void VoxelBufferInternal::clear_channel(Channel &channel, uint64_t clear_value) {
if (channel.data != nullptr) {
delete_channel(channel_index);
delete_channel(channel);
}
channel.defval = clamp_value_for_depth(clear_value, channel.depth);
}
@ -423,25 +427,26 @@ inline bool is_uniform_b(const uint8_t *data, size_t item_count) {
bool VoxelBufferInternal::is_uniform(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, true);
const Channel &channel = _channels[channel_index];
return is_uniform(channel);
}
bool VoxelBufferInternal::is_uniform(const Channel &channel) {
if (channel.data == nullptr) {
// Channel has been optimized
return true;
}
const size_t volume = get_volume();
// Channel isn't optimized, so must look at each voxel
switch (channel.depth) {
case DEPTH_8_BIT:
return ::is_uniform_b<uint8_t>(channel.data, volume);
return ::is_uniform_b<uint8_t>(channel.data, channel.size_in_bytes);
case DEPTH_16_BIT:
return ::is_uniform_b<uint16_t>(channel.data, volume);
return ::is_uniform_b<uint16_t>(channel.data, channel.size_in_bytes / 2);
case DEPTH_32_BIT:
return ::is_uniform_b<uint32_t>(channel.data, volume);
return ::is_uniform_b<uint32_t>(channel.data, channel.size_in_bytes / 4);
case DEPTH_64_BIT:
return ::is_uniform_b<uint64_t>(channel.data, volume);
return ::is_uniform_b<uint64_t>(channel.data, channel.size_in_bytes / 8);
default:
CRASH_NOW();
break;
@ -450,13 +455,40 @@ bool VoxelBufferInternal::is_uniform(unsigned int channel_index) const {
return true;
}
uint64_t get_first_voxel(const VoxelBufferInternal::Channel &channel) {
CRASH_COND(channel.data == nullptr);
switch (channel.depth) {
case VoxelBufferInternal::DEPTH_8_BIT:
return channel.data[0];
case VoxelBufferInternal::DEPTH_16_BIT:
return reinterpret_cast<uint16_t *>(channel.data)[0];
case VoxelBufferInternal::DEPTH_32_BIT:
return reinterpret_cast<uint32_t *>(channel.data)[0];
case VoxelBufferInternal::DEPTH_64_BIT:
return reinterpret_cast<uint64_t *>(channel.data)[0];
default:
CRASH_NOW();
return 0;
}
}
void VoxelBufferInternal::compress_uniform_channels() {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
if (_channels[i].data != nullptr && is_uniform(i)) {
// TODO More direct way
const uint64_t v = get_voxel(0, 0, 0, i);
clear_channel(i, v);
}
Channel &channel = _channels[i];
compress_if_uniform(channel);
}
}
void VoxelBufferInternal::compress_if_uniform(Channel &channel) {
VOXEL_PROFILE_SCOPE();
if (channel.data != nullptr && is_uniform(channel)) {
const uint64_t v = get_first_voxel(channel);
clear_channel(channel, v);
}
}
@ -629,6 +661,10 @@ bool VoxelBufferInternal::create_channel_noinit(int i, Vector3i size) {
void VoxelBufferInternal::delete_channel(int i) {
Channel &channel = _channels[i];
delete_channel(channel);
}
void VoxelBufferInternal::delete_channel(Channel &channel) {
ERR_FAIL_COND(channel.data == nullptr);
// Don't use `_size` to obtain `data` byte count, since we could have changed `_size` up-front during a create().
// `size_in_bytes` reflects what is currently allocated inside `data`, regardless of anything else.

View File

@ -261,6 +261,7 @@ public:
for_each_index_and_pos(box, [&data, action_func, offset](size_t i, Vector3i pos) {
data.set(i, action_func(pos + offset, data[i]));
});
compress_if_uniform(channel);
}
// void action_func(Vector3i pos, Data0_T &inout_v0, Data1_T &inout_v1)
@ -284,6 +285,8 @@ public:
// TODO The caller must still specify exactly the correct type, maybe some conversion could be used
action_func(pos + offset, data0[i], data1[i]);
});
compress_if_uniform(channel0);
compress_if_uniform(channel1);
}
template <typename F>
@ -435,6 +438,10 @@ private:
bool create_channel_noinit(int i, Vector3i size);
bool create_channel(int i, uint64_t defval);
void delete_channel(int i);
void compress_if_uniform(Channel &channel);
static void delete_channel(Channel &channel);
static void clear_channel(Channel &channel, uint64_t clear_value);
static bool is_uniform(const Channel &channel);
private:
// Each channel can store arbitary data.

View File

@ -214,7 +214,8 @@ bool VoxelDataMap::is_block_surrounded(Vector3i pos) const {
return true;
}
void VoxelDataMap::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const {
void VoxelDataMap::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask,
void *callback_data, void (*gen_func)(void *, VoxelBufferInternal &, Vector3i)) const {
const Vector3i max_pos = min_pos + dst_buffer.get_size();
const Vector3i min_block_pos = voxel_to_block(min_pos);
@ -222,32 +223,49 @@ void VoxelDataMap::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsig
const Vector3i block_size_v(_block_size, _block_size, _block_size);
unsigned int channels_count;
FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channels =
VoxelBufferInternal::mask_to_channels_list(channels_mask, channels_count);
Vector3i bpos;
for (bpos.z = min_block_pos.z; bpos.z < max_block_pos.z; ++bpos.z) {
for (bpos.x = min_block_pos.x; bpos.x < max_block_pos.x; ++bpos.x) {
for (bpos.y = min_block_pos.y; bpos.y < max_block_pos.y; ++bpos.y) {
for (unsigned int channel = 0; channel < VoxelBufferInternal::MAX_CHANNELS; ++channel) {
if (((1 << channel) & channels_mask) == 0) {
continue;
}
const VoxelDataBlock *block = get_block(bpos);
const Vector3i src_block_origin = block_to_voxel(bpos);
const VoxelDataBlock *block = get_block(bpos);
const Vector3i src_block_origin = block_to_voxel(bpos);
if (block != nullptr) {
const VoxelBufferInternal &src_buffer = block->get_voxels_const();
if (block != nullptr) {
const VoxelBufferInternal &src_buffer = block->get_voxels_const();
RWLockRead lock(src_buffer.get_lock());
for (unsigned int ci = 0; ci < channels_count; ++ci) {
const uint8_t channel = channels[ci];
dst_buffer.set_channel_depth(channel, src_buffer.get_channel_depth(channel));
RWLockRead lock(src_buffer.get_lock());
// Note: copy_from takes care of clamping the area if it's on an edge
dst_buffer.copy_from(src_buffer,
min_pos - src_block_origin,
src_buffer.get_size(),
Vector3i(),
channel);
}
} else {
} else if (gen_func != nullptr) {
const Box3i box = Box3i(bpos << _block_size_pow2, Vector3i(_block_size))
.clipped(Box3i(min_pos, dst_buffer.get_size()));
// TODO Format?
VoxelBufferInternal temp;
temp.create(box.size);
gen_func(callback_data, temp, box.pos);
for (unsigned int ci = 0; ci < channels_count; ++ci) {
dst_buffer.copy_from(temp, Vector3i(), temp.get_size(), box.pos - min_pos, channels[ci]);
}
} else {
for (unsigned int ci = 0; ci < channels_count; ++ci) {
const uint8_t channel = channels[ci];
// For now, inexistent blocks default to hardcoded defaults, corresponding to "empty space".
// If we want to change this, we may have to add an API for that.
dst_buffer.fill_area(

View File

@ -56,8 +56,13 @@ public:
void set_default_voxel(int value, unsigned int channel = 0);
int get_default_voxel(unsigned int channel = 0);
inline void copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const {
copy(min_pos, dst_buffer, channels_mask, nullptr, nullptr);
}
// Gets a copy of all voxels in the area starting at min_pos having the same size as dst_buffer.
void copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const;
void copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask, void *,
void (*gen_func)(void *, VoxelBufferInternal &, Vector3i)) const;
void paste(Vector3i min_pos, VoxelBufferInternal &src_buffer, unsigned int channels_mask, bool use_mask,
uint64_t mask_value, bool create_new_blocks);
@ -116,36 +121,53 @@ public:
bool is_area_fully_loaded(const Box3i voxels_box) const;
// D action(Vector3i pos, D value)
template <typename F>
void write_box(const Box3i &voxel_box, unsigned int channel, F action) {
inline void write_box(const Box3i &voxel_box, unsigned int channel, F action) {
write_box(voxel_box, channel, action, [](const VoxelBufferInternal &, const Vector3i &) {});
}
// D action(Vector3i pos, D value)
template <typename F, typename G>
void write_box(const Box3i &voxel_box, unsigned int channel, F action, G gen_func) {
const Box3i block_box = voxel_box.downscaled(get_block_size());
const Vector3i block_size(get_block_size());
block_box.for_each_cell_zxy([this, action, voxel_box, channel, block_size](Vector3i block_pos) {
block_box.for_each_cell_zxy([this, action, voxel_box, channel, block_size, gen_func](Vector3i block_pos) {
VoxelDataBlock *block = get_block(block_pos);
if (block != nullptr) {
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
block->get_voxels().write_box(local_box, channel, action, block_origin);
if (block == nullptr) {
VOXEL_PROFILE_SCOPE("Generate");
block = create_default_block(block_pos);
gen_func(block->get_voxels(), block_pos << get_block_size_pow2());
}
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
RWLockWrite wlock(block->get_voxels().get_lock());
block->get_voxels().write_box(local_box, channel, action, block_origin);
});
}
// action(Vector3i pos, D0 &value, D1 &value)
template <typename F>
void write_box_2(const Box3i &voxel_box, unsigned int channel0, unsigned int channel1, F action) {
inline void write_box_2(const Box3i &voxel_box, unsigned int channel0, unsigned int channel1, F action) {
write_box_2(voxel_box, channel0, channel1, action, [](const VoxelBufferInternal &, const Vector3i &) {});
}
// action(Vector3i pos, D0 &value, D1 &value)
template <typename F, typename G>
void write_box_2(const Box3i &voxel_box, unsigned int channel0, unsigned int channel1, F action, G gen_func) {
const Box3i block_box = voxel_box.downscaled(get_block_size());
const Vector3i block_size(get_block_size());
block_box.for_each_cell_zxy([this, action, voxel_box, channel0, channel1, block_size](Vector3i block_pos) {
block_box.for_each_cell_zxy([this, action, voxel_box, channel0, channel1, block_size, gen_func](Vector3i block_pos) {
VoxelDataBlock *block = get_block(block_pos);
if (block != nullptr) {
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
block->get_voxels().write_box_2_template<F, uint16_t, uint16_t>(
local_box, channel0, channel1, action, block_origin);
if (block == nullptr) {
block = create_default_block(block_pos);
gen_func(block->get_voxels(), block_pos << get_block_size_pow2());
}
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
RWLockWrite wlock(block->get_voxels().get_lock());
block->get_voxels().write_box_2_template<F, uint16_t, uint16_t>(
local_box, channel0, channel1, action, block_origin);
});
}

View File

@ -1,8 +1,10 @@
#include "voxel_stream_sqlite.h"
#include "../../thirdparty/sqlite/sqlite3.h"
#include "../../util/godot/funcs.h"
#include "../../util/macros.h"
#include "../../util/profiling.h"
#include "../compressed_data.h"
#include <limits>
#include <string>
@ -87,6 +89,13 @@ public:
bool save_block(BlockLocation loc, const std::vector<uint8_t> &block_data, BlockType type);
VoxelStream::Result load_block(BlockLocation loc, std::vector<uint8_t> &out_block_data, BlockType type);
bool load_all_blocks(void *callback_data,
void (*process_block_func)(
void *callback_data,
BlockLocation location,
Span<const uint8_t> voxel_data,
Span<const uint8_t> instances_data));
Meta load_meta();
void save_meta(Meta meta);
@ -130,6 +139,7 @@ private:
sqlite3_stmt *_save_meta_statement = nullptr;
sqlite3_stmt *_load_channels_statement = nullptr;
sqlite3_stmt *_save_channel_statement = nullptr;
sqlite3_stmt *_load_all_blocks_statement = nullptr;
};
VoxelStreamSQLiteInternal::VoxelStreamSQLiteInternal() {
@ -206,6 +216,9 @@ bool VoxelStreamSQLiteInternal::open(const char *fpath) {
"ON CONFLICT(idx) DO UPDATE SET depth=excluded.depth")) {
return false;
}
if (!prepare(db, &_load_all_blocks_statement, "SELECT * FROM blocks")) {
return false;
}
// Is the database setup?
Meta meta = load_meta();
@ -240,6 +253,7 @@ void VoxelStreamSQLiteInternal::close() {
finalize(_save_meta_statement);
finalize(_load_channels_statement);
finalize(_save_channel_statement);
finalize(_load_all_blocks_statement);
sqlite3_close(_db);
_db = nullptr;
_opened_path.clear();
@ -389,6 +403,59 @@ VoxelStream::Result VoxelStreamSQLiteInternal::load_block(
return result;
}
bool VoxelStreamSQLiteInternal::load_all_blocks(void *callback_data,
void (*process_block_func)(
void *callback_data,
BlockLocation location,
Span<const uint8_t> voxel_data,
Span<const uint8_t> instances_data)) {
VOXEL_PROFILE_SCOPE();
CRASH_COND(process_block_func == nullptr);
sqlite3 *db = _db;
sqlite3_stmt *load_all_blocks_statement = _load_all_blocks_statement;
int rc;
rc = sqlite3_reset(load_all_blocks_statement);
if (rc != SQLITE_OK) {
ERR_PRINT(sqlite3_errmsg(db));
return false;
}
while (true) {
rc = sqlite3_step(load_all_blocks_statement);
if (rc == SQLITE_ROW) {
VOXEL_PROFILE_SCOPE_NAMED("Row");
const uint64_t eloc = sqlite3_column_int64(load_all_blocks_statement, 0);
const BlockLocation loc = BlockLocation::decode(eloc);
const void *voxels_blob = sqlite3_column_blob(load_all_blocks_statement, 1);
const size_t voxels_blob_size = sqlite3_column_bytes(load_all_blocks_statement, 1);
const void *instances_blob = sqlite3_column_blob(load_all_blocks_statement, 2);
const size_t instances_blob_size = sqlite3_column_bytes(load_all_blocks_statement, 2);
// Using a function pointer because returning a big list of a copy of all the blobs can
// waste a lot of temporary memory
process_block_func(callback_data, loc,
Span<const uint8_t>(reinterpret_cast<const uint8_t *>(voxels_blob), voxels_blob_size),
Span<const uint8_t>(reinterpret_cast<const uint8_t *>(instances_blob), instances_blob_size));
} else if (rc == SQLITE_DONE) {
break;
} else {
ERR_PRINT(String("Unexpected SQLite return code: {0}; errmsg: {1}").format(rc, sqlite3_errmsg(db)));
return false;
}
}
return true;
}
VoxelStreamSQLiteInternal::Meta VoxelStreamSQLiteInternal::load_meta() {
sqlite3 *db = _db;
sqlite3_stmt *load_meta_statement = _load_meta_statement;
@ -751,6 +818,60 @@ void VoxelStreamSQLite::save_instance_blocks(Span<VoxelStreamInstanceDataRequest
}
}
void VoxelStreamSQLite::load_all_blocks(FullLoadingResult &result) {
VOXEL_PROFILE_SCOPE();
VoxelStreamSQLiteInternal *con = get_connection();
ERR_FAIL_COND(con == nullptr);
struct Context {
VoxelStreamSQLite &stream;
FullLoadingResult &result;
};
Context ctx{ *this, result };
const bool request_result = con->load_all_blocks(&ctx, [](void *callback_data,
const BlockLocation location,
Span<const uint8_t> voxel_data,
Span<const uint8_t> instances_data) {
Context *ctx = reinterpret_cast<Context *>(callback_data);
if (voxel_data.size() == 0 && instances_data.size() == 0) {
PRINT_VERBOSE(String("Unexpected empty voxel data and instances data at {0} lod {1}")
.format(varray(Vector3(location.x, location.y, location.z), location.lod)));
return;
}
FullLoadingResult::Block result_block;
result_block.position = Vector3i(location.x, location.y, location.z);
result_block.lod = location.lod;
if (voxel_data.size() > 0) {
std::shared_ptr<VoxelBufferInternal> voxels = gd_make_shared<VoxelBufferInternal>();
ERR_FAIL_COND(!ctx->stream._voxel_block_serializer.decompress_and_deserialize(voxel_data, *voxels));
result_block.voxels = voxels;
}
if (instances_data.size() > 0) {
std::vector<uint8_t> &temp_block_data = ctx->stream._temp_block_data;
if (!VoxelCompressedData::decompress(instances_data, temp_block_data)) {
ERR_PRINT("Failed to decompress instance block");
return;
}
result_block.instances_data = std::make_unique<VoxelInstanceBlockData>();
if (!deserialize_instance_block_data(*result_block.instances_data, to_span_const(temp_block_data))) {
ERR_PRINT("Failed to deserialize instance block");
return;
}
}
ctx->result.blocks.push_back(std::move(result_block));
});
ERR_FAIL_COND(request_result == false);
}
int VoxelStreamSQLite::get_used_channels_mask() const {
// Assuming all, since that stream can store anything.
return VoxelBufferInternal::ALL_CHANNELS_MASK;

View File

@ -32,6 +32,9 @@ public:
Span<VoxelStreamInstanceDataRequest> out_blocks, Span<Result> out_results) override;
void save_instance_blocks(Span<VoxelStreamInstanceDataRequest> p_blocks) override;
bool supports_loading_all_blocks() const override { return true; }
void load_all_blocks(FullLoadingResult &result) override;
int get_used_channels_mask() const override;
void flush_cache();

View File

@ -49,6 +49,10 @@ void VoxelStream::save_instance_blocks(Span<VoxelStreamInstanceDataRequest> p_bl
// Can be implemented in subclasses
}
void VoxelStream::load_all_blocks(FullLoadingResult &result) {
ERR_PRINT(String("{0} does not support `load_all_blocks`").format(varray(get_class_name())));
}
int VoxelStream::get_used_channels_mask() const {
return 0;
}

View File

@ -50,6 +50,7 @@ public:
// This function is recommended if you save to files, because you can batch their access.
virtual void immerge_blocks(Span<VoxelBlockRequest> p_blocks);
// TODO Merge support functions into a single getter with Feature bitmask
virtual bool supports_instance_blocks() const;
virtual void load_instance_blocks(
@ -57,6 +58,20 @@ public:
virtual void save_instance_blocks(Span<VoxelStreamInstanceDataRequest> p_blocks);
struct FullLoadingResult {
struct Block {
std::shared_ptr<VoxelBufferInternal> voxels;
std::unique_ptr<VoxelInstanceBlockData> instances_data;
Vector3i position;
unsigned int lod;
};
std::vector<Block> blocks;
};
virtual bool supports_loading_all_blocks() const { return false; }
virtual void load_all_blocks(FullLoadingResult &result);
// Tells which channels can be found in this stream.
// The simplest implementation is to return them all.
// One reason to specify which channels are available is to help the editor detect configuration issues,

View File

@ -15,6 +15,8 @@
#include <scene/3d/mesh_instance.h>
#include <scene/resources/packed_scene.h>
#include "../thirdparty/nvtx3/nvToolsExt.h"
namespace {
Ref<ArrayMesh> build_mesh(const Vector<Array> surfaces, Mesh::PrimitiveType primitive, int compression_flags,
@ -316,6 +318,14 @@ void VoxelLodTerrain::_on_stream_params_changed() {
// TODO We have to figure out streams that have a LOD requirement
// const int stream_lod_count = _stream->get_lod_count();
// _set_lod_count(min(stream_lod_count, get_lod_count()));
if (_full_load_mode && !_stream->supports_loading_all_blocks()) {
ERR_PRINT("The chosen stream does not supports loading all blocks. Full load mode cannot be used.");
_full_load_mode = false;
#ifdef TOOLS_ENABLED
property_list_changed_notify();
#endif
}
}
VoxelServer::get_singleton()->set_volume_data_block_size(_volume_id, get_data_block_size());
@ -405,6 +415,17 @@ void VoxelLodTerrain::set_mesh_block_size(unsigned int mesh_block_size) {
set_voxel_bounds(_bounds_in_voxels);
}
void VoxelLodTerrain::set_full_load_mode_enabled(bool enabled) {
if (enabled != _full_load_mode) {
_full_load_mode = enabled;
_on_stream_params_changed();
}
}
bool VoxelLodTerrain::is_full_load_mode_enabled() const {
return _full_load_mode;
}
void VoxelLodTerrain::_set_block_size_po2(int p_block_size_po2) {
_lods[0].data_map.create(p_block_size_po2, 0);
}
@ -451,41 +472,88 @@ inline int get_octree_size_po2(const VoxelLodTerrain &self) {
}
bool VoxelLodTerrain::is_area_editable(Box3i p_voxel_box) const {
if (_full_load_mode) {
return true;
}
const Box3i voxel_box = p_voxel_box.clipped(_bounds_in_voxels);
const Lod &lod0 = _lods[0];
const bool all_blocks_present = lod0.data_map.is_area_fully_loaded(voxel_box);
return all_blocks_present;
}
uint64_t VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel, uint64_t defval) const {
Vector3i block_pos = pos >> get_data_block_size_pow2();
for (unsigned int lod_index = 0; lod_index < _lod_count; ++lod_index) {
const Lod &lod = _lods[lod_index];
const VoxelDataBlock *block = lod.data_map.get_block(block_pos);
if (block != nullptr) {
return lod.data_map.get_voxel(pos, channel);
}
// Fallback on lower LOD
block_pos = block_pos >> 1;
VoxelSingleValue VoxelLodTerrain::get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval) {
if (!_bounds_in_voxels.contains(pos)) {
return defval;
}
Vector3i block_pos = pos >> get_data_block_size_pow2();
if (_full_load_mode) {
const Lod &lod0 = _lods[0];
const VoxelDataBlock *block = lod0.data_map.get_block(block_pos);
if (block == nullptr) {
if (_generator.is_valid()) {
return _generator->generate_single(pos, channel);
}
}
return defval;
} else {
for (unsigned int lod_index = 0; lod_index < _lod_count; ++lod_index) {
const Lod &lod = _lods[lod_index];
const VoxelDataBlock *block = lod.data_map.get_block(block_pos);
if (block != nullptr) {
VoxelSingleValue v;
if (channel == VoxelBufferInternal::CHANNEL_SDF) {
v.f = lod.data_map.get_voxel_f(pos, channel);
} else {
v.i = lod.data_map.get_voxel(pos, channel);
}
return v;
}
// Fallback on lower LOD
block_pos = block_pos >> 1;
}
return defval;
}
return defval;
}
bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value) {
const Vector3i block_pos_lod0 = pos >> get_data_block_size_pow2();
Lod &lod0 = _lods[0];
VoxelDataBlock *block = lod0.data_map.get_block(block_pos_lod0);
if (block != nullptr) {
lod0.data_map.set_voxel(value, pos, channel);
return true;
} else {
return false;
if (block == nullptr) {
if (!_full_load_mode) {
return false;
}
if (_generator.is_valid()) {
std::shared_ptr<VoxelBufferInternal> voxels = gd_make_shared<VoxelBufferInternal>();
voxels->create(Vector3i(get_data_block_size()));
VoxelBlockRequest r{ *voxels, pos, 0 };
_generator->generate_block(r);
block = lod0.data_map.set_block_buffer(block_pos_lod0, voxels);
}
}
// TODO If it turns out to be a problem, use CoW
VoxelBufferInternal &voxels = block->get_voxels();
RWLockWrite lock(voxels.get_lock());
voxels.set_voxel(value, lod0.data_map.to_local(pos), channel);
return true;
}
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) const {
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) {
const Lod &lod0 = _lods[0];
lod0.data_map.copy(p_origin_voxels, dst_buffer, channels_mask);
if (_full_load_mode && _generator.is_valid()) {
VoxelGenerator *generator = *_generator;
lod0.data_map.copy(p_origin_voxels, dst_buffer, channels_mask, generator,
[](void *callback_data, VoxelBufferInternal &voxels, Vector3i pos) {
VoxelGenerator *generator = reinterpret_cast<VoxelGenerator *>(callback_data);
VoxelBlockRequest r{ voxels, pos, 0 };
generator->generate_block(r);
});
} else {
lod0.data_map.copy(p_origin_voxels, dst_buffer, channels_mask);
}
}
// Marks intersecting blocks in the area as modified, updates LODs and schedules remeshing.
@ -582,6 +650,12 @@ void VoxelLodTerrain::stop_updater() {
void VoxelLodTerrain::start_streamer() {
VoxelServer::get_singleton()->set_volume_stream(_volume_id, _stream);
VoxelServer::get_singleton()->set_volume_generator(_volume_id, _generator);
if (_full_load_mode && _stream.is_valid()) {
// TODO May want to defer this to be sure it's not done multiple times.
// This would be a side-effect of setting properties one by one, either by scene loader or by script
VoxelServer::get_singleton()->request_all_stream_blocks(_volume_id);
}
}
void VoxelLodTerrain::stop_streamer() {
@ -943,37 +1017,39 @@ bool VoxelLodTerrain::check_block_loaded_and_meshed(const Vector3i &p_mesh_block
ERR_FAIL_COND_V(!check_block_sizes(data_block_size, mesh_block_size), false);
#endif
if (mesh_block_size > data_block_size) {
const int factor = mesh_block_size / data_block_size;
const Vector3i data_block_pos0 = p_mesh_block_pos * factor;
if (_full_load_mode == false) {
if (mesh_block_size > data_block_size) {
const int factor = mesh_block_size / data_block_size;
const Vector3i data_block_pos0 = p_mesh_block_pos * factor;
bool loaded = true;
bool loaded = true;
for (int z = 0; z < factor; ++z) {
for (int x = 0; x < factor; ++x) {
for (int y = 0; y < factor; ++y) {
const Vector3i data_block_pos(data_block_pos0 + Vector3i(x, y, z));
VoxelDataBlock *data_block = lod.data_map.get_block(data_block_pos);
for (int z = 0; z < factor; ++z) {
for (int x = 0; x < factor; ++x) {
for (int y = 0; y < factor; ++y) {
const Vector3i data_block_pos(data_block_pos0 + Vector3i(x, y, z));
VoxelDataBlock *data_block = lod.data_map.get_block(data_block_pos);
if (data_block == nullptr) {
loaded = false;
// TODO This is quite lossy in this case, if we ask for 8 blocks in an octant
try_schedule_loading_with_neighbors(data_block_pos, lod_index);
if (data_block == nullptr) {
loaded = false;
// TODO This is quite lossy in this case, if we ask for 8 blocks in an octant
try_schedule_loading_with_neighbors(data_block_pos, lod_index);
}
}
}
}
}
if (!loaded) {
return false;
}
if (!loaded) {
return false;
}
} else if (mesh_block_size == data_block_size) {
const Vector3i data_block_pos = p_mesh_block_pos;
VoxelDataBlock *block = lod.data_map.get_block(data_block_pos);
if (block == nullptr) {
try_schedule_loading_with_neighbors(data_block_pos, lod_index);
return false;
} else if (mesh_block_size == data_block_size) {
const Vector3i data_block_pos = p_mesh_block_pos;
VoxelDataBlock *block = lod.data_map.get_block(data_block_pos);
if (block == nullptr) {
try_schedule_loading_with_neighbors(data_block_pos, lod_index);
return false;
}
}
}
@ -1014,17 +1090,19 @@ bool VoxelLodTerrain::check_block_mesh_updated(VoxelMeshBlock *block) {
}
});
// Check if neighbors are loaded
bool surrounded = true;
for (unsigned int i = 0; i < neighbor_positions_count; ++i) {
const Vector3i npos = neighbor_positions[i];
if (!lod.data_map.has_block(npos)) {
// That neighbor is not loaded
surrounded = false;
if (!lod.has_loading_block(npos)) {
// Schedule loading for that neighbor
lod.blocks_to_load.push_back(npos);
lod.loading_blocks.insert(npos);
if (_full_load_mode == false) {
// Check if neighbors are loaded
for (unsigned int i = 0; i < neighbor_positions_count; ++i) {
const Vector3i npos = neighbor_positions[i];
if (!lod.data_map.has_block(npos)) {
// That neighbor is not loaded
surrounded = false;
if (!lod.has_loading_block(npos)) {
// Schedule loading for that neighbor
lod.blocks_to_load.push_back(npos);
lod.loading_blocks.insert(npos);
}
}
}
}
@ -1102,7 +1180,7 @@ void VoxelLodTerrain::_process(float delta) {
ProfilingClock profiling_clock;
// Unload data blocks falling out of block region extent
{
if (_full_load_mode == false) {
VOXEL_PROFILE_SCOPE_NAMED("Sliding box data unload");
// TODO Could it actually be enough to have a rolling update on all blocks?
@ -1840,6 +1918,7 @@ void VoxelLodTerrain::process_fading_blocks(float delta) {
}
void VoxelLodTerrain::flush_pending_lod_edits() {
VOXEL_PROFILE_SCOPE();
// Propagates edits performed so far to other LODs.
// These LODs must be currently in memory, otherwise terrain data will miss it.
// This is currently ensured by the fact we load blocks in a "pyramidal" way,
@ -1886,9 +1965,25 @@ void VoxelLodTerrain::flush_pending_lod_edits() {
src_block->set_needs_lodding(false);
if (dst_block == nullptr) {
ERR_PRINT(String("Destination block {0} not found when cascading edits on LOD {1}")
.format(varray(dst_bpos.to_vec3(), dst_lod_index)));
continue;
if (_full_load_mode) {
// TODO Doing this on the main thread can be very demanding and cause a stall.
// We should find a way to make it asynchronous, not need mips, or not edit outside viewers area.
std::shared_ptr<VoxelBufferInternal> voxels = gd_make_shared<VoxelBufferInternal>();
voxels->create(Vector3i(get_data_block_size()));
if (_generator.is_valid()) {
VOXEL_PROFILE_SCOPE_NAMED("Generate");
VoxelBlockRequest r{ *voxels,
dst_bpos << (dst_lod_index + get_data_block_size_pow2()),
int(dst_lod_index) };
_generator->generate_block(r);
}
dst_block = dst_lod.data_map.set_block_buffer(dst_bpos, voxels);
} else {
ERR_PRINT(String("Destination block {0} not found when cascading edits on LOD {1}")
.format(varray(dst_bpos.to_vec3(), dst_lod_index)));
continue;
}
}
// The block and its lower LODs are expected to be available.
@ -1918,6 +2013,7 @@ void VoxelLodTerrain::flush_pending_lod_edits() {
// This must always be done after an edit before it gets saved, otherwise LODs won't match and it will look ugly.
// TODO Optimization: try to narrow to edited region instead of taking whole block
{
VOXEL_PROFILE_SCOPE_NAMED("Downscale");
RWLockWrite lock(src_block->get_voxels().get_lock());
src_block->get_voxels().downscale_to(
dst_block->get_voxels(), Vector3i(), src_block->get_voxels_const().get_size(), rel * half_bs);
@ -2483,6 +2579,17 @@ void VoxelLodTerrain::update_gizmos() {
}
}
// Edited blocks
if (_show_edited_lod0_blocks) {
const Lod &lod0 = _lods[0];
const int data_block_size = lod0.data_map.get_block_size();
lod0.data_map.for_all_blocks([&dr, parent_transform, data_block_size](const VoxelDataBlock &block) {
const Transform local_transform(Basis(), (block.position * data_block_size).to_vec3());
const Transform t = parent_transform * local_transform;
dr.draw_box_mm(t, Color8(255, 255, 0, 255));
});
}
dr.end();
}
@ -2620,6 +2727,9 @@ void VoxelLodTerrain::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_data_block_size"), &VoxelLodTerrain::get_data_block_size);
ClassDB::bind_method(D_METHOD("get_data_block_region_extent"), &VoxelLodTerrain::get_data_block_region_extent);
ClassDB::bind_method(D_METHOD("set_full_load_mode_enabled"), &VoxelLodTerrain::set_full_load_mode_enabled);
ClassDB::bind_method(D_METHOD("is_full_load_mode_enabled"), &VoxelLodTerrain::is_full_load_mode_enabled);
ClassDB::bind_method(D_METHOD("get_statistics"), &VoxelLodTerrain::_b_get_statistics);
ClassDB::bind_method(D_METHOD("voxel_to_data_block_position", "lod_index"),
&VoxelLodTerrain::voxel_to_data_block_position);
@ -2693,4 +2803,6 @@ void VoxelLodTerrain::_bind_methods() {
ADD_PROPERTY(PropertyInfo(Variant::BOOL, "run_stream_in_editor"),
"set_run_stream_in_editor", "is_stream_running_in_editor");
ADD_PROPERTY(PropertyInfo(Variant::INT, "mesh_block_size"), "set_mesh_block_size", "get_mesh_block_size");
ADD_PROPERTY(PropertyInfo(Variant::BOOL, "full_load_mode_enabled"),
"set_full_load_mode_enabled", "is_full_load_mode_enabled");
}

View File

@ -79,31 +79,48 @@ public:
unsigned int get_mesh_block_size() const;
void set_mesh_block_size(unsigned int mesh_block_size);
void set_full_load_mode_enabled(bool enabled);
bool is_full_load_mode_enabled() const;
bool is_area_editable(Box3i p_box) const;
uint64_t get_voxel(Vector3i pos, unsigned int channel, uint64_t defval) const;
VoxelSingleValue get_voxel(Vector3i pos, unsigned int channel, VoxelSingleValue defval);
bool try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value);
void copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) const;
void copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask);
template <typename F>
void write_box(const Box3i &p_voxel_box, unsigned int channel, F action) {
const Box3i voxel_box = p_voxel_box.clipped(_bounds_in_voxels);
if (is_area_editable(voxel_box)) {
_lods[0].data_map.write_box(voxel_box, channel, action);
post_edit_area(voxel_box);
} else {
if (_full_load_mode == false && !is_area_editable(voxel_box)) {
PRINT_VERBOSE("Area not editable");
return;
}
Ref<VoxelGenerator> generator = _generator;
_lods[0].data_map.write_box(voxel_box, channel, action,
[&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelBlockRequest r{ voxels, pos, 0 };
generator->generate_block(r);
}
});
post_edit_area(voxel_box);
}
template <typename F>
void write_box_2(const Box3i &p_voxel_box, unsigned int channel1, unsigned int channel2, F action) {
const Box3i voxel_box = p_voxel_box.clipped(_bounds_in_voxels);
if (is_area_editable(voxel_box)) {
_lods[0].data_map.write_box_2(voxel_box, channel1, channel2, action);
post_edit_area(voxel_box);
} else {
if (_full_load_mode == false && !is_area_editable(voxel_box)) {
PRINT_VERBOSE("Area not editable");
return;
}
Ref<VoxelGenerator> generator = _generator;
_lods[0].data_map.write_box_2(voxel_box, channel1, channel2, action,
[&generator](VoxelBufferInternal &voxels, Vector3i pos) {
if (generator.is_valid()) {
VoxelBlockRequest r{ voxels, pos, 0 };
generator->generate_block(r);
}
});
post_edit_area(voxel_box);
}
// These must be called after an edit
@ -317,13 +334,15 @@ private:
float _lod_distance = 0.f;
float _lod_fade_duration = 0.f;
unsigned int _view_distance_voxels = 512;
bool _full_load_mode = false;
bool _run_stream_in_editor = true;
#ifdef TOOLS_ENABLED
bool _show_gizmos_enabled = false;
bool _show_octree_bounds_gizmos = true;
bool _show_volume_bounds_gizmos = true;
bool _show_gizmos_enabled = true;
bool _show_octree_bounds_gizmos = false;
bool _show_volume_bounds_gizmos = false;
bool _show_octree_node_gizmos = false;
bool _show_edited_lod0_blocks = true;
VoxelDebug::DebugRenderer _debug_renderer;
#endif

View File

@ -388,8 +388,10 @@ void test_voxel_graph_generator_texturing() {
// Single value tests
{
const float sdf_must_be_in_air = generator->generate_single(Vector3i(-2, 0, 0));
const float sdf_must_be_in_ground = generator->generate_single(Vector3i(2, 0, 0));
const float sdf_must_be_in_air =
generator->generate_single(Vector3i(-2, 0, 0), VoxelBufferInternal::CHANNEL_SDF).f;
const float sdf_must_be_in_ground =
generator->generate_single(Vector3i(2, 0, 0), VoxelBufferInternal::CHANNEL_SDF).f;
ERR_FAIL_COND(sdf_must_be_in_air <= 0.f);
ERR_FAIL_COND(sdf_must_be_in_ground >= 0.f);
@ -403,7 +405,7 @@ void test_voxel_graph_generator_texturing() {
// Sample two points 1 unit below ground at to heights on the slope
{
const float sdf = generator->generate_single(Vector3i(-2, -3, 0));
const float sdf = generator->generate_single(Vector3i(-2, -3, 0), VoxelBufferInternal::CHANNEL_SDF).f;
ERR_FAIL_COND(sdf >= 0.f);
const VoxelGraphRuntime::State &state = VoxelGeneratorGraph::get_last_state_from_current_thread();
@ -419,7 +421,7 @@ void test_voxel_graph_generator_texturing() {
ERR_FAIL_COND(out_weight1_buffer.data[0] > 0.f);
}
{
const float sdf = generator->generate_single(Vector3i(2, 1, 0));
const float sdf = generator->generate_single(Vector3i(2, 1, 0), VoxelBufferInternal::CHANNEL_SDF).f;
ERR_FAIL_COND(sdf >= 0.f);
const VoxelGraphRuntime::State &state = VoxelGeneratorGraph::get_last_state_from_current_thread();

View File

@ -218,6 +218,12 @@ public:
}
}
inline void difference(const Box3i &b, std::vector<Box3i> &output) {
difference(b, [&output](const Box3i &sub_box) {
output.push_back(sub_box);
});
}
// Calls a function on all side cell positions belonging to the box.
// This function was implemented with no particular order in mind.
template <typename F>