Split VoxelBuffer in two, script-facing wrapper and internal.

The internal is the main use within the voxel engine,
and does not inherit Reference.
This commit is contained in:
Marc Gilleron 2021-09-26 04:14:50 +01:00
parent 43adf2d6e7
commit 30db0c5193
65 changed files with 2152 additions and 1913 deletions

View File

@ -4,7 +4,7 @@
#include "../util/profiling.h"
VoxelTool::VoxelTool() {
_sdf_scale = VoxelBuffer::get_sdf_quantization_scale(VoxelBuffer::DEFAULT_SDF_CHANNEL_DEPTH);
_sdf_scale = VoxelBufferInternal::get_sdf_quantization_scale(VoxelBufferInternal::DEFAULT_SDF_CHANNEL_DEPTH);
}
void VoxelTool::set_value(uint64_t val) {

View File

@ -27,9 +27,9 @@ void VoxelToolBuffer::do_sphere(Vector3 center, float radius) {
Box3i box(Vector3i(center) - Vector3i(Math::floor(radius)), Vector3i(Math::ceil(radius) * 2));
box.clip(Box3i(Vector3i(), _buffer->get_size()));
_buffer->write_box_2_template<TextureBlendSphereOp, uint16_t, uint16_t>(box,
VoxelBuffer::CHANNEL_INDICES,
VoxelBuffer::CHANNEL_WEIGHTS,
_buffer->get_buffer().write_box_2_template<TextureBlendSphereOp, uint16_t, uint16_t>(box,
VoxelBufferInternal::CHANNEL_INDICES,
VoxelBufferInternal::CHANNEL_WEIGHTS,
TextureBlendSphereOp(center, radius, _texture_params),
Vector3i());
@ -38,17 +38,17 @@ void VoxelToolBuffer::do_sphere(Vector3 center, float radius) {
uint64_t VoxelToolBuffer::_get_voxel(Vector3i pos) const {
ERR_FAIL_COND_V(_buffer.is_null(), 0);
return _buffer->get_voxel(pos, _channel);
return _buffer->get_buffer().get_voxel(pos, _channel);
}
float VoxelToolBuffer::_get_voxel_f(Vector3i pos) const {
ERR_FAIL_COND_V(_buffer.is_null(), 0);
return _buffer->get_voxel_f(pos.x, pos.y, pos.z, _channel);
return _buffer->get_buffer().get_voxel_f(pos.x, pos.y, pos.z, _channel);
}
void VoxelToolBuffer::_set_voxel(Vector3i pos, uint64_t v) {
ERR_FAIL_COND(_buffer.is_null());
return _buffer->set_voxel(v, pos, _channel);
return _buffer->get_buffer().set_voxel(v, pos, _channel);
}
void VoxelToolBuffer::_set_voxel_f(Vector3i pos, float v) {
@ -63,12 +63,12 @@ void VoxelToolBuffer::_post_edit(const Box3i &box) {
void VoxelToolBuffer::set_voxel_metadata(Vector3i pos, Variant meta) {
ERR_FAIL_COND(_buffer.is_null());
_buffer->set_voxel_metadata(pos, meta);
_buffer->get_buffer().set_voxel_metadata(pos, meta);
}
Variant VoxelToolBuffer::get_voxel_metadata(Vector3i pos) const {
ERR_FAIL_COND_V(_buffer.is_null(), Variant());
return _buffer->get_voxel_metadata(pos);
return _buffer->get_buffer().get_voxel_metadata(pos);
}
void VoxelToolBuffer::paste(Vector3i p_pos, Ref<VoxelBuffer> p_voxels, uint8_t channels_mask, bool use_mask,
@ -81,8 +81,8 @@ void VoxelToolBuffer::paste(Vector3i p_pos, Ref<VoxelBuffer> p_voxels, uint8_t c
ERR_FAIL_COND(_buffer.is_null());
ERR_FAIL_COND(p_voxels.is_null());
VoxelBuffer *dst = *_buffer;
const VoxelBuffer *src = *p_voxels;
VoxelBufferInternal &dst = _buffer->get_buffer();
const VoxelBufferInternal &src = p_voxels->get_buffer();
Box3i box(p_pos, p_voxels->get_size());
const Vector3i min_noclamp = box.pos;
@ -93,8 +93,8 @@ void VoxelToolBuffer::paste(Vector3i p_pos, Ref<VoxelBuffer> p_voxels, uint8_t c
}
unsigned int channel_count;
FixedArray<uint8_t, VoxelBuffer::MAX_CHANNELS> channels =
VoxelBuffer::mask_to_channels_list(channels_mask, channel_count);
FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channels =
VoxelBufferInternal::mask_to_channels_list(channels_mask, channel_count);
const Vector3i box_max = box.pos + box.size;
@ -110,17 +110,18 @@ void VoxelToolBuffer::paste(Vector3i p_pos, Ref<VoxelBuffer> p_voxels, uint8_t c
for (int y = box.pos.y; y < box_max.y; ++y) {
const int by = y - min_noclamp.y;
const uint64_t v = src->get_voxel(bx, by, bz, channel_index);
const uint64_t v = src.get_voxel(bx, by, bz, channel_index);
if (v != mask_value) {
dst->set_voxel(v, x, y, z, channel_index);
dst.set_voxel(v, x, y, z, channel_index);
// Overwrite previous metadata
dst->set_voxel_metadata(Vector3i(x, y, z), Variant());
dst.set_voxel_metadata(Vector3i(x, y, z), Variant());
}
}
}
}
}
_buffer->copy_voxel_metadata_in_area(p_voxels, Box3i(Vector3i(), p_voxels->get_size()), p_pos);
_buffer->get_buffer().copy_voxel_metadata_in_area(
p_voxels->get_buffer(), Box3i(Vector3i(), p_voxels->get_size()), p_pos);
}

View File

@ -96,7 +96,7 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
bool operator()(Vector3i pos) {
// This is not particularly optimized, but runs fast enough for player raycasts
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBuffer::CHANNEL_SDF, 0);
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf < 0;
@ -138,7 +138,7 @@ Ref<VoxelRaycastResult> VoxelToolLodTerrain::raycast(
const VoxelLodTerrain *terrain;
inline float operator()(const Vector3i &pos) const {
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBuffer::CHANNEL_SDF, 0);
const uint64_t raw_value = terrain->get_voxel(pos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf;
@ -178,7 +178,7 @@ void VoxelToolLodTerrain::do_sphere(Vector3 center, float radius) {
op.shape.center = center;
op.shape.radius = radius;
op.shape.scale = _sdf_scale;
_terrain->write_box(box, VoxelBuffer::CHANNEL_SDF, op);
_terrain->write_box(box, VoxelBufferInternal::CHANNEL_SDF, op);
} break;
case MODE_REMOVE: {
@ -186,7 +186,7 @@ void VoxelToolLodTerrain::do_sphere(Vector3 center, float radius) {
op.shape.center = center;
op.shape.radius = radius;
op.shape.scale = _sdf_scale;
_terrain->write_box(box, VoxelBuffer::CHANNEL_SDF, op);
_terrain->write_box(box, VoxelBufferInternal::CHANNEL_SDF, op);
} break;
case MODE_SET: {
@ -194,11 +194,11 @@ void VoxelToolLodTerrain::do_sphere(Vector3 center, float radius) {
op.shape.center = center;
op.shape.radius = radius;
op.shape.scale = _sdf_scale;
_terrain->write_box(box, VoxelBuffer::CHANNEL_SDF, op);
_terrain->write_box(box, VoxelBufferInternal::CHANNEL_SDF, op);
} break;
case MODE_TEXTURE_PAINT: {
_terrain->write_box_2(box, VoxelBuffer::CHANNEL_INDICES, VoxelBuffer::CHANNEL_WEIGHTS,
_terrain->write_box_2(box, VoxelBufferInternal::CHANNEL_INDICES, VoxelBufferInternal::CHANNEL_WEIGHTS,
TextureBlendSphereOp{ center, radius, _texture_params });
} break;
@ -214,7 +214,7 @@ void VoxelToolLodTerrain::copy(Vector3i pos, Ref<VoxelBuffer> dst, uint8_t chann
if (channels_mask == 0) {
channels_mask = (1 << _channel);
}
_terrain->copy(pos, **dst, channels_mask);
_terrain->copy(pos, dst->get_buffer(), channels_mask);
}
float VoxelToolLodTerrain::get_voxel_f_interpolated(Vector3 position) const {
@ -223,7 +223,7 @@ float VoxelToolLodTerrain::get_voxel_f_interpolated(Vector3 position) const {
const VoxelLodTerrain *terrain = _terrain;
// TODO Optimization: is it worth a making a fast-path for this?
return get_sdf_interpolated([terrain, channel](Vector3i ipos) {
const uint64_t raw_value = terrain->get_voxel(ipos, VoxelBuffer::CHANNEL_SDF, 0);
const uint64_t raw_value = terrain->get_voxel(ipos, VoxelBufferInternal::CHANNEL_SDF, 0);
// TODO Format should be accessible from terrain
const float sdf = u16_to_norm(raw_value);
return sdf;
@ -283,16 +283,18 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
// Copy source data
// TODO Do not assume channel, at the moment it's hardcoded for smooth terrain
static const int channels_mask = (1 << VoxelBuffer::CHANNEL_SDF);
static const int main_channel = VoxelBuffer::CHANNEL_SDF;
static const int channels_mask = (1 << VoxelBufferInternal::CHANNEL_SDF);
static const int main_channel = VoxelBufferInternal::CHANNEL_SDF;
Ref<VoxelBuffer> source_copy_buffer;
// TODO We should be able to use `VoxelBufferInternal`, just needs some things exposed
Ref<VoxelBuffer> source_copy_buffer_ref;
{
VOXEL_PROFILE_SCOPE_NAMED("Copy");
source_copy_buffer.instance();
source_copy_buffer->create(world_box.size);
voxel_tool.copy(world_box.pos, source_copy_buffer, channels_mask);
source_copy_buffer_ref.instance();
source_copy_buffer_ref->create(world_box.size.x, world_box.size.y, world_box.size.z);
voxel_tool.copy(world_box.pos, source_copy_buffer_ref, channels_mask);
}
VoxelBufferInternal &source_copy_buffer = source_copy_buffer_ref->get_buffer();
// Label distinct voxel groups
@ -307,7 +309,7 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
island_finder.scan_3d(
Box3i(Vector3i(), world_box.size), [&source_copy_buffer](Vector3i pos) {
// TODO Can be optimized further with direct access
return source_copy_buffer->get_voxel_f(pos.x, pos.y, pos.z, main_channel) < 0.f;
return source_copy_buffer.get_voxel_f(pos.x, pos.y, pos.z, main_channel) < 0.f;
},
to_span(ccl_output), &label_count);
}
@ -418,18 +420,21 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
const Vector3i world_pos = world_box.pos + local_bounds.min_pos - Vector3i(min_padding);
const Vector3i size = local_bounds.max_pos - local_bounds.min_pos + Vector3i(1 + max_padding + min_padding);
Ref<VoxelBuffer> buffer;
buffer.instance();
buffer->create(size);
// TODO We should be able to use `VoxelBufferInternal`, just needs some things exposed
Ref<VoxelBuffer> buffer_ref;
buffer_ref.instance();
buffer_ref->create(size.x, size.y, size.z);
// Read voxels from the source volume
voxel_tool.copy(world_pos, buffer, channels_mask);
voxel_tool.copy(world_pos, buffer_ref, channels_mask);
VoxelBufferInternal &buffer = buffer_ref->get_buffer();
// Cleanup padding borders
const Box3i inner_box(Vector3i(min_padding), buffer->get_size() - Vector3i(min_padding + max_padding));
Box3i(Vector3i(), buffer->get_size())
const Box3i inner_box(Vector3i(min_padding), buffer.get_size() - Vector3i(min_padding + max_padding));
Box3i(Vector3i(), buffer.get_size())
.difference(inner_box, [&buffer](Box3i box) {
buffer->fill_area_f(1.f, box.pos, box.pos + box.size, main_channel);
buffer.fill_area_f(1.f, box.pos, box.pos + box.size, main_channel);
});
// Filter out voxels that don't belong to this label
@ -441,7 +446,7 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
const uint8_t label2 = ccl_output[ccl_index];
if (label2 != 0 && label != label2) {
buffer->set_voxel_f(1.f,
buffer.set_voxel_f(1.f,
min_padding + x - local_bounds.min_pos.x,
min_padding + y - local_bounds.min_pos.y,
min_padding + z - local_bounds.min_pos.z, main_channel);
@ -450,7 +455,7 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
}
}
instances_info.push_back(InstanceInfo{ buffer, world_pos, label });
instances_info.push_back(InstanceInfo{ buffer_ref, world_pos, label });
}
}
@ -545,6 +550,7 @@ static Array separate_floating_chunks(VoxelTool &voxel_tool, Box3i world_box, No
// TODO Option to make multiple convex shapes
// TODO Use the fast way. This is slow because of the internal TriangleMesh thing.
// TODO Don't create a body if the mesh has no triangles
Ref<Shape> shape = mesh->create_convex_shape();
ERR_CONTINUE(shape.is_null());
CollisionShape *collision_shape = memnew(CollisionShape);

View File

@ -139,7 +139,7 @@ void VoxelToolTerrain::copy(Vector3i pos, Ref<VoxelBuffer> dst, uint8_t channels
if (channels_mask == 0) {
channels_mask = (1 << _channel);
}
_terrain->get_storage().copy(pos, **dst, channels_mask);
_terrain->get_storage().copy(pos, dst->get_buffer(), channels_mask);
}
void VoxelToolTerrain::paste(Vector3i pos, Ref<VoxelBuffer> p_voxels, uint8_t channels_mask, bool use_mask,
@ -149,7 +149,7 @@ void VoxelToolTerrain::paste(Vector3i pos, Ref<VoxelBuffer> p_voxels, uint8_t ch
if (channels_mask == 0) {
channels_mask = (1 << _channel);
}
_terrain->get_storage().paste(pos, **p_voxels, channels_mask, use_mask, mask_value, false);
_terrain->get_storage().paste(pos, p_voxels->get_buffer(), channels_mask, use_mask, mask_value, false);
_post_edit(Box3i(pos, p_voxels->get_size()));
}
@ -206,8 +206,8 @@ void VoxelToolTerrain::set_voxel_metadata(Vector3i pos, Variant meta) {
VoxelDataMap &map = _terrain->get_storage();
VoxelDataBlock *block = map.get_block(map.voxel_to_block(pos));
ERR_FAIL_COND_MSG(block == nullptr, "Area not editable");
RWLockWrite lock(block->get_voxels()->get_lock());
block->get_voxels()->set_voxel_metadata(map.to_local(pos), meta);
RWLockWrite lock(block->get_voxels().get_lock());
block->get_voxels().set_voxel_metadata(map.to_local(pos), meta);
}
Variant VoxelToolTerrain::get_voxel_metadata(Vector3i pos) const {
@ -215,8 +215,8 @@ Variant VoxelToolTerrain::get_voxel_metadata(Vector3i pos) const {
VoxelDataMap &map = _terrain->get_storage();
VoxelDataBlock *block = map.get_block(map.voxel_to_block(pos));
ERR_FAIL_COND_V_MSG(block == nullptr, Variant(), "Area not editable");
RWLockRead lock(block->get_voxels()->get_lock());
return block->get_voxels()->get_voxel_metadata(map.to_local(pos));
RWLockRead lock(block->get_voxels().get_lock());
return block->get_voxels_const().get_voxel_metadata(map.to_local(pos));
}
// Executes a function on random voxels in the provided area, using the type channel.
@ -240,7 +240,7 @@ void VoxelToolTerrain::run_blocky_random_tick(AABB voxel_area, int voxel_count,
const Vector3i min_pos = Vector3i(voxel_area.position);
const Vector3i max_pos = min_pos + Vector3i(voxel_area.size);
const VoxelDataMap &map = _terrain->get_storage();
VoxelDataMap &map = _terrain->get_storage();
const Vector3i min_block_pos = map.voxel_to_block(min_pos);
const Vector3i max_block_pos = map.voxel_to_block(max_pos);
@ -266,14 +266,15 @@ void VoxelToolTerrain::run_blocky_random_tick(AABB voxel_area, int voxel_count,
const Vector3i block_origin = map.block_to_voxel(block_pos);
const VoxelDataBlock *block = map.get_block(block_pos);
VoxelDataBlock *block = map.get_block(block_pos);
if (block != nullptr) {
// Doing ONLY reads here.
{
RWLockRead lock(block->get_voxels()->get_lock());
RWLockRead lock(block->get_voxels().get_lock());
const VoxelBufferInternal &voxels = block->get_voxels_const();
if (block->get_voxels()->get_channel_compression(channel) == VoxelBuffer::COMPRESSION_UNIFORM) {
const uint64_t v = block->get_voxels()->get_voxel(0, 0, 0, channel);
if (voxels.get_channel_compression(channel) == VoxelBuffer::COMPRESSION_UNIFORM) {
const uint64_t v = voxels.get_voxel(0, 0, 0, channel);
if (lib.has_voxel(v)) {
const Voxel &vt = lib.get_voxel_const(v);
if (!vt.is_random_tickable()) {
@ -291,7 +292,7 @@ void VoxelToolTerrain::run_blocky_random_tick(AABB voxel_area, int voxel_count,
Math::rand() & bs_mask,
Math::rand() & bs_mask);
const uint64_t v = block->get_voxels()->get_voxel(rpos, channel);
const uint64_t v = voxels.get_voxel(rpos, channel);
picks[vi] = Pick{ v, rpos };
}
}
@ -333,20 +334,20 @@ void VoxelToolTerrain::for_each_voxel_metadata_in_area(AABB voxel_area, Ref<Func
const Box3i data_block_box = voxel_box.downscaled(_terrain->get_data_block_size());
const VoxelDataMap &map = _terrain->get_storage();
VoxelDataMap &map = _terrain->get_storage();
data_block_box.for_each_cell([&map, &callback, voxel_box](Vector3i block_pos) {
const VoxelDataBlock *block = map.get_block(block_pos);
VoxelDataBlock *block = map.get_block(block_pos);
if (block == nullptr) {
return;
}
ERR_FAIL_COND(block->get_voxels().is_null());
const Vector3i block_origin = block_pos * map.get_block_size();
const Box3i rel_voxel_box(voxel_box.pos - block_origin, voxel_box.size);
// TODO Worth it locking blocks for metadata?
block->get_voxels()->for_each_voxel_metadata_in_area(rel_voxel_box, [&callback, block_origin](Vector3i rel_pos, Variant meta) {
block->get_voxels().for_each_voxel_metadata_in_area(rel_voxel_box, [&callback, block_origin](Vector3i rel_pos, Variant meta) {
const Variant key = (rel_pos + block_origin).to_vec3();
const Variant *args[2] = { &key, &meta };
Variant::CallError err;

View File

@ -1,4 +1,5 @@
#include "vox_import_funcs.h"
//#include "../../storage/voxel_buffer_internal.h"
#include "../../util/godot/funcs.h"
namespace VoxImportUtils {
@ -29,7 +30,7 @@ static void offset_surface(Array &surface, Vector3 offset) {
surface[Mesh::ARRAY_VERTEX] = positions;
}
Ref<Mesh> build_mesh(VoxelBuffer &voxels, VoxelMesher &mesher,
Ref<Mesh> build_mesh(const VoxelBufferInternal &voxels, VoxelMesher &mesher,
std::vector<unsigned int> &surface_index_to_material, Ref<Image> &out_atlas, float p_scale, Vector3 p_offset) {
//
VoxelMesher::Output output;

View File

@ -7,7 +7,7 @@
namespace VoxImportUtils {
Ref<Mesh> build_mesh(VoxelBuffer &voxels, VoxelMesher &mesher,
Ref<Mesh> build_mesh(const VoxelBufferInternal &voxels, VoxelMesher &mesher,
std::vector<unsigned int> &surface_index_to_material, Ref<Image> &out_atlas, float p_scale, Vector3 p_offset);
} // namespace VoxImportUtils

View File

@ -279,21 +279,20 @@ Error VoxelVoxImporter::import(const String &p_source_file, const String &p_save
for (unsigned int model_index = 0; model_index < data.get_model_count(); ++model_index) {
const vox::Model &model = data.get_model(model_index);
Ref<VoxelBuffer> voxels;
voxels.instance();
voxels->create(model.size + Vector3i(VoxelMesherCubes::PADDING * 2));
voxels->decompress_channel(VoxelBuffer::CHANNEL_COLOR);
VoxelBufferInternal voxels;
voxels.create(model.size + Vector3i(VoxelMesherCubes::PADDING * 2));
voxels.decompress_channel(VoxelBuffer::CHANNEL_COLOR);
Span<uint8_t> dst_color_indices;
ERR_FAIL_COND_V(!voxels->get_channel_raw(VoxelBuffer::CHANNEL_COLOR, dst_color_indices), ERR_BUG);
ERR_FAIL_COND_V(!voxels.get_channel_raw(VoxelBuffer::CHANNEL_COLOR, dst_color_indices), ERR_BUG);
Span<const uint8_t> src_color_indices = to_span_const(model.color_indexes);
copy_3d_region_zxy(dst_color_indices, voxels->get_size(), Vector3i(VoxelMesherCubes::PADDING),
copy_3d_region_zxy(dst_color_indices, voxels.get_size(), Vector3i(VoxelMesherCubes::PADDING),
src_color_indices, model.size, Vector3i(), model.size);
std::vector<unsigned int> surface_index_to_material;
Ref<Image> atlas;
Ref<Mesh> mesh = VoxImportUtils::build_mesh(
**voxels, **mesher, surface_index_to_material, atlas, p_scale, Vector3());
voxels, **mesher, surface_index_to_material, atlas, p_scale, Vector3());
if (mesh.is_null()) {
continue;
@ -346,7 +345,7 @@ Error VoxelVoxImporter::import(const String &p_source_file, const String &p_save
mesh_info.mesh = mesh;
// In MagicaVoxel scene graph, pivots are at the center of models, not at the lower corner.
// TODO I don't know if this is correct, but I could not find a reference saying how that pivot should be calculated
mesh_info.pivot = (voxels->get_size() / 2 - Vector3i(1)).to_vec3();
mesh_info.pivot = (voxels.get_size() / 2 - Vector3i(1)).to_vec3();
meshes.write[model_index] = mesh_info;
}

View File

@ -1,12 +1,12 @@
#include "vox_mesh_importer.h"
#include "../../constants/voxel_string_names.h"
#include "../../meshers/cubes/voxel_mesher_cubes.h"
#include "../../storage/voxel_buffer.h"
#include "../../storage/voxel_buffer_internal.h"
#include "../../storage/voxel_memory_pool.h"
#include "../../streams/vox_data.h"
#include "../../util/macros.h"
#include "../../util/profiling.h"
#include "vox_import_funcs.h"
#include "../../util/macros.h"
String VoxelVoxMeshImporter::get_importer_name() const {
return "VoxelVoxMeshImporter";
@ -128,7 +128,7 @@ void for_each_model_instance(const vox::Data &vox_data, F f) {
struct ModelInstance {
// Model with baked rotation
Ref<VoxelBuffer> voxels;
std::unique_ptr<VoxelBufferInternal> voxels;
// Lowest corner position
Vector3i position;
};
@ -161,25 +161,25 @@ static void extract_model_instances(const vox::Data &vox_data, std::vector<Model
// TODO Optimization: implement transformation for VoxelBuffers so we can avoid using a temporary copy.
// Didn't do it yet because VoxelBuffers also have metadata and the `transform_3d_array_zxy` function only works on arrays.
Ref<VoxelBuffer> voxels;
voxels.instance();
std::unique_ptr<VoxelBufferInternal> voxels = std::make_unique<VoxelBufferInternal>();
voxels->create(dst_size);
voxels->decompress_channel(VoxelBuffer::CHANNEL_COLOR);
voxels->decompress_channel(VoxelBufferInternal::CHANNEL_COLOR);
Span<uint8_t> dst_color_indices;
ERR_FAIL_COND(!voxels->get_channel_raw(VoxelBuffer::CHANNEL_COLOR, dst_color_indices));
ERR_FAIL_COND(!voxels->get_channel_raw(VoxelBufferInternal::CHANNEL_COLOR, dst_color_indices));
CRASH_COND(src_color_indices.size() != dst_color_indices.size());
memcpy(dst_color_indices.data(), src_color_indices.data(), dst_color_indices.size() * sizeof(uint8_t));
ModelInstance mi;
mi.voxels = voxels;
mi.position = args.position - voxels->get_size() / 2;
out_instances.push_back(mi);
mi.voxels = std::move(voxels);
mi.position = args.position - mi.voxels->get_size() / 2;
out_instances.push_back(std::move(mi));
});
}
static Ref<VoxelBuffer> make_single_voxel_grid(Span<const ModelInstance> instances, Vector3i &out_origin) {
static bool make_single_voxel_grid(Span<const ModelInstance> instances, Vector3i &out_origin,
VoxelBufferInternal &out_voxels) {
// Determine total size
const ModelInstance &first_instance = instances[0];
Box3i bounding_box(first_instance.position, first_instance.voxels->get_size());
@ -192,25 +192,24 @@ static Ref<VoxelBuffer> make_single_voxel_grid(Span<const ModelInstance> instanc
// 3 gigabytes
const size_t limit = 3'000'000'000ull;
const size_t volume = bounding_box.size.volume();
ERR_FAIL_COND_V_MSG(volume > limit, Ref<VoxelBuffer>(),
ERR_FAIL_COND_V_MSG(volume > limit, false,
String("Vox data is too big to be meshed as a single mesh ({0}: {0} bytes)")
.format(varray(bounding_box.size.to_vec3(), SIZE_T_TO_VARIANT(volume))));
Ref<VoxelBuffer> voxels;
voxels.instance();
voxels->create(bounding_box.size + Vector3i(VoxelMesherCubes::PADDING * 2));
voxels->set_channel_depth(VoxelBuffer::CHANNEL_COLOR, VoxelBuffer::DEPTH_8_BIT);
voxels->decompress_channel(VoxelBuffer::CHANNEL_COLOR);
out_voxels.create(bounding_box.size + Vector3i(VoxelMesherCubes::PADDING * 2));
out_voxels.set_channel_depth(VoxelBufferInternal::CHANNEL_COLOR, VoxelBufferInternal::DEPTH_8_BIT);
out_voxels.decompress_channel(VoxelBufferInternal::CHANNEL_COLOR);
for (unsigned int instance_index = 0; instance_index < instances.size(); ++instance_index) {
const ModelInstance &mi = instances[instance_index];
voxels->copy_from(**mi.voxels, Vector3i(), mi.voxels->get_size(),
ERR_FAIL_COND_V(mi.voxels == nullptr, false);
out_voxels.copy_from(*mi.voxels, Vector3i(), mi.voxels->get_size(),
mi.position - bounding_box.pos + Vector3i(VoxelMesherCubes::PADDING),
VoxelBuffer::CHANNEL_COLOR);
VoxelBufferInternal::CHANNEL_COLOR);
}
out_origin = bounding_box.pos;
return voxels;
return true;
}
Error VoxelVoxMeshImporter::import(const String &p_source_file, const String &p_save_path,
@ -253,8 +252,10 @@ Error VoxelVoxMeshImporter::import(const String &p_source_file, const String &p_
// TODO Optimization: this approach uses a lot of memory, might fail on scenes with a large bounding box.
// One workaround would be to mesh the scene incrementally in chunks, giving up greedy meshing beyond 256 or so.
Vector3i bounding_box_origin;
Ref<VoxelBuffer> voxels = make_single_voxel_grid(to_span_const(model_instances), bounding_box_origin);
ERR_FAIL_COND_V(voxels.is_null(), ERR_CANT_CREATE);
VoxelBufferInternal voxels;
const bool single_grid_succeeded =
make_single_voxel_grid(to_span_const(model_instances), bounding_box_origin, voxels);
ERR_FAIL_COND_V(!single_grid_succeeded, ERR_CANT_CREATE);
// We no longer need these
model_instances.clear();
@ -274,14 +275,14 @@ Error VoxelVoxMeshImporter::import(const String &p_source_file, const String &p_
offset = bounding_box_origin.to_vec3();
break;
case PIVOT_CENTER:
offset = -((voxels->get_size() - Vector3i(1)) / 2).to_vec3();
offset = -((voxels.get_size() - Vector3i(1)) / 2).to_vec3();
break;
default:
ERR_FAIL_V(ERR_BUG);
break;
};
mesh = VoxImportUtils::build_mesh(**voxels, **mesher, surface_index_to_material, atlas, p_scale, offset);
mesh = VoxImportUtils::build_mesh(voxels, **mesher, surface_index_to_material, atlas, p_scale, offset);
// Deallocate large temporary memory to free space.
// This is a workaround because VoxelBuffer uses this by default, however it doesn't fit the present use case.
// Eventually we should avoid using this pool here.

View File

@ -236,7 +236,7 @@ void VoxelGeneratorGraph::set_sdf_clip_threshold(float t) {
}
int VoxelGeneratorGraph::get_used_channels_mask() const {
return 1 << VoxelBuffer::CHANNEL_SDF;
return 1 << VoxelBufferInternal::CHANNEL_SDF;
}
void VoxelGeneratorGraph::set_use_subdivision(bool use) {
@ -275,8 +275,8 @@ bool VoxelGeneratorGraph::is_using_xz_caching() const {
// Instead, we could only generate them near zero-crossings, because this is where materials will be seen.
// The problem is that it's harder to manage at the moment, to support edited blocks and LOD...
void VoxelGeneratorGraph::gather_indices_and_weights(Span<const WeightOutput> weight_outputs,
const VoxelGraphRuntime::State &state, Vector3i rmin, Vector3i rmax, int ry, VoxelBuffer &out_voxel_buffer,
FixedArray<uint8_t, 4> spare_indices) {
const VoxelGraphRuntime::State &state, Vector3i rmin, Vector3i rmax, int ry,
VoxelBufferInternal &out_voxel_buffer, FixedArray<uint8_t, 4> spare_indices) {
VOXEL_PROFILE_SCOPE();
// TODO Optimization: exclude up-front outputs that are known to be zero?
@ -312,8 +312,8 @@ void VoxelGeneratorGraph::gather_indices_and_weights(Span<const WeightOutput> we
const uint16_t encoded_weights =
encode_weights_to_packed_u16(weights[0], weights[1], weights[2], weights[3]);
// TODO Flatten this further?
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBuffer::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBuffer::CHANNEL_WEIGHTS);
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBufferInternal::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBufferInternal::CHANNEL_WEIGHTS);
++value_index;
}
}
@ -335,8 +335,8 @@ void VoxelGeneratorGraph::gather_indices_and_weights(Span<const WeightOutput> we
const uint16_t encoded_weights =
encode_weights_to_packed_u16(weights[0], weights[1], weights[2], weights[3]);
// TODO Flatten this further?
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBuffer::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBuffer::CHANNEL_WEIGHTS);
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBufferInternal::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBufferInternal::CHANNEL_WEIGHTS);
++value_index;
}
}
@ -380,8 +380,8 @@ void VoxelGeneratorGraph::gather_indices_and_weights(Span<const WeightOutput> we
const uint16_t encoded_weights =
encode_weights_to_packed_u16(weights[0], weights[1], weights[2], weights[3]);
// TODO Flatten this further?
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBuffer::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBuffer::CHANNEL_WEIGHTS);
out_voxel_buffer.set_voxel(encoded_indices, rx, ry, rz, VoxelBufferInternal::CHANNEL_INDICES);
out_voxel_buffer.set_voxel(encoded_weights, rx, ry, rz, VoxelBufferInternal::CHANNEL_WEIGHTS);
++value_index;
}
}
@ -401,16 +401,16 @@ VoxelGenerator::Result VoxelGeneratorGraph::generate_block(VoxelBlockRequest &in
return result;
}
VoxelBuffer &out_buffer = **input.voxel_buffer;
VoxelBufferInternal &out_buffer = input.voxel_buffer;
const Vector3i bs = out_buffer.get_size();
const VoxelBuffer::ChannelId channel = VoxelBuffer::CHANNEL_SDF;
const VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
const Vector3i origin = input.origin_in_voxels;
// TODO This may be shared across the module
// Storing voxels is lossy on some depth configurations. They use normalized SDF,
// so we must scale the values to make better use of the offered resolution
const float sdf_scale = VoxelBuffer::get_sdf_quantization_scale(
const float sdf_scale = VoxelBufferInternal::get_sdf_quantization_scale(
out_buffer.get_channel_depth(out_buffer.get_channel_depth(channel)));
const int stride = 1 << input.lod;

View File

@ -182,8 +182,8 @@ private:
};
static void gather_indices_and_weights(Span<const WeightOutput> weight_outputs,
const VoxelGraphRuntime::State &state, Vector3i rmin, Vector3i rmax, int ry, VoxelBuffer &out_voxel_buffer,
FixedArray<uint8_t, 4> spare_indices);
const VoxelGraphRuntime::State &state, Vector3i rmin, Vector3i rmax, int ry,
VoxelBufferInternal &out_voxel_buffer, FixedArray<uint8_t, 4> spare_indices);
static void _bind_methods();

View File

@ -6,8 +6,9 @@ VoxelGeneratorFlat::VoxelGeneratorFlat() {
VoxelGeneratorFlat::~VoxelGeneratorFlat() {
}
void VoxelGeneratorFlat::set_channel(VoxelBuffer::ChannelId channel) {
ERR_FAIL_INDEX(channel, VoxelBuffer::MAX_CHANNELS);
void VoxelGeneratorFlat::set_channel(VoxelBuffer::ChannelId p_channel) {
ERR_FAIL_INDEX(p_channel, VoxelBufferInternal::MAX_CHANNELS);
VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::ChannelId(p_channel);
bool changed = false;
{
RWLockWrite wlock(_parameters_lock);
@ -23,7 +24,7 @@ void VoxelGeneratorFlat::set_channel(VoxelBuffer::ChannelId channel) {
VoxelBuffer::ChannelId VoxelGeneratorFlat::get_channel() const {
RWLockRead rlock(_parameters_lock);
return _parameters.channel;
return VoxelBuffer::ChannelId(_parameters.channel);
}
int VoxelGeneratorFlat::get_used_channels_mask() const {
@ -53,7 +54,6 @@ float VoxelGeneratorFlat::get_height() const {
VoxelGenerator::Result VoxelGeneratorFlat::generate_block(VoxelBlockRequest &input) {
Result result;
ERR_FAIL_COND_V(input.voxel_buffer.is_null(), result);
Parameters params;
{
@ -61,7 +61,7 @@ VoxelGenerator::Result VoxelGeneratorFlat::generate_block(VoxelBlockRequest &inp
params = _parameters;
}
VoxelBuffer &out_buffer = **input.voxel_buffer;
VoxelBufferInternal &out_buffer = input.voxel_buffer;
const Vector3i origin = input.origin_in_voxels;
const int channel = params.channel;
const Vector3i bs = out_buffer.get_size();

View File

@ -10,7 +10,7 @@ public:
VoxelGeneratorFlat();
~VoxelGeneratorFlat();
void set_channel(VoxelBuffer::ChannelId channel);
void set_channel(VoxelBuffer::ChannelId p_channel);
VoxelBuffer::ChannelId get_channel() const;
int get_used_channels_mask() const override;
@ -27,7 +27,7 @@ protected:
private:
struct Parameters {
VoxelBuffer::ChannelId channel = VoxelBuffer::CHANNEL_SDF;
VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
int voxel_type = 1;
float height = 0;
float iso_scale = 0.1;

View File

@ -14,7 +14,7 @@ void VoxelGeneratorHeightmap::set_channel(VoxelBuffer::ChannelId channel) {
{
RWLockWrite wlock(_parameters_lock);
if (_parameters.channel != channel) {
_parameters.channel = channel;
_parameters.channel = VoxelBufferInternal::ChannelId(channel);
changed = true;
}
}
@ -25,7 +25,7 @@ void VoxelGeneratorHeightmap::set_channel(VoxelBuffer::ChannelId channel) {
VoxelBuffer::ChannelId VoxelGeneratorHeightmap::get_channel() const {
RWLockRead rlock(_parameters_lock);
return _parameters.channel;
return VoxelBuffer::ChannelId(_parameters.channel);
}
int VoxelGeneratorHeightmap::get_used_channels_mask() const {

View File

@ -26,7 +26,7 @@ public:
protected:
template <typename Height_F>
Result generate(VoxelBuffer &out_buffer, Height_F height_func, Vector3i origin, int lod) {
Result generate(VoxelBufferInternal &out_buffer, Height_F height_func, Vector3i origin, int lod) {
Parameters params;
{
RWLockRead rlock(_parameters_lock);
@ -35,7 +35,7 @@ protected:
const int channel = params.channel;
const Vector3i bs = out_buffer.get_size();
const bool use_sdf = channel == VoxelBuffer::CHANNEL_SDF;
const bool use_sdf = channel == VoxelBufferInternal::CHANNEL_SDF;
if (origin.y > get_height_start() + get_height_range()) {
// The bottom of the block is above the highest ground can go (default is air)
@ -111,7 +111,7 @@ private:
};
struct Parameters {
VoxelBuffer::ChannelId channel = VoxelBuffer::CHANNEL_SDF;
VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
int matter_type = 1;
Range range;
float iso_scale = 0.1;

View File

@ -64,7 +64,7 @@ bool VoxelGeneratorImage::is_blur_enabled() const {
}
VoxelGenerator::Result VoxelGeneratorImage::generate_block(VoxelBlockRequest &input) {
VoxelBuffer &out_buffer = **input.voxel_buffer;
VoxelBufferInternal &out_buffer = input.voxel_buffer;
Parameters params;
{

View File

@ -41,8 +41,9 @@ void VoxelGeneratorNoise::_on_noise_changed() {
_parameters.noise = _noise->duplicate();
}
void VoxelGeneratorNoise::set_channel(VoxelBuffer::ChannelId channel) {
ERR_FAIL_INDEX(channel, VoxelBuffer::MAX_CHANNELS);
void VoxelGeneratorNoise::set_channel(VoxelBuffer::ChannelId p_channel) {
ERR_FAIL_INDEX(p_channel, VoxelBuffer::MAX_CHANNELS);
VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::ChannelId(p_channel);
bool changed = false;
{
RWLockWrite wlock(_parameters_lock);
@ -58,7 +59,7 @@ void VoxelGeneratorNoise::set_channel(VoxelBuffer::ChannelId channel) {
VoxelBuffer::ChannelId VoxelGeneratorNoise::get_channel() const {
RWLockRead rlock(_parameters_lock);
return _parameters.channel;
return VoxelBuffer::ChannelId(_parameters.channel);
}
int VoxelGeneratorNoise::get_used_channels_mask() const {
@ -127,8 +128,6 @@ static inline float get_shaped_noise(OpenSimplexNoise &noise, float x, float y,
}
VoxelGenerator::Result VoxelGeneratorNoise::generate_block(VoxelBlockRequest &input) {
ERR_FAIL_COND_V(input.voxel_buffer.is_null(), Result());
Parameters params;
{
RWLockRead rlock(_parameters_lock);
@ -138,7 +137,7 @@ VoxelGenerator::Result VoxelGeneratorNoise::generate_block(VoxelBlockRequest &in
ERR_FAIL_COND_V(params.noise.is_null(), Result());
OpenSimplexNoise &noise = **params.noise;
VoxelBuffer &buffer = **input.voxel_buffer;
VoxelBufferInternal &buffer = input.voxel_buffer;
Vector3i origin_in_voxels = input.origin_in_voxels;
int lod = input.lod;
@ -154,22 +153,22 @@ VoxelGenerator::Result VoxelGeneratorNoise::generate_block(VoxelBlockRequest &in
if (origin_in_voxels.y >= isosurface_upper_bound) {
// Fill with air
if (params.channel == VoxelBuffer::CHANNEL_SDF) {
if (params.channel == VoxelBufferInternal::CHANNEL_SDF) {
buffer.clear_channel_f(params.channel, 100.0);
} else if (params.channel == VoxelBuffer::CHANNEL_TYPE) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_TYPE) {
buffer.clear_channel(params.channel, air_type);
} else if (params.channel == VoxelBuffer::CHANNEL_COLOR) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_COLOR) {
buffer.clear_channel(params.channel, air_color);
}
result.max_lod_hint = true;
} else if (origin_in_voxels.y + (buffer.get_size().y << lod) < isosurface_lower_bound) {
// Fill with matter
if (params.channel == VoxelBuffer::CHANNEL_SDF) {
if (params.channel == VoxelBufferInternal::CHANNEL_SDF) {
buffer.clear_channel_f(params.channel, -100.0);
} else if (params.channel == VoxelBuffer::CHANNEL_TYPE) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_TYPE) {
buffer.clear_channel(params.channel, matter_type);
} else if (params.channel == VoxelBuffer::CHANNEL_COLOR) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_COLOR) {
buffer.clear_channel(params.channel, matter_color);
}
result.max_lod_hint = true;
@ -191,22 +190,22 @@ VoxelGenerator::Result VoxelGeneratorNoise::generate_block(VoxelBlockRequest &in
if (ly < isosurface_lower_bound) {
// Below is only matter
if (params.channel == VoxelBuffer::CHANNEL_SDF) {
if (params.channel == VoxelBufferInternal::CHANNEL_SDF) {
buffer.set_voxel_f(-1, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_TYPE) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_TYPE) {
buffer.set_voxel(matter_type, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_COLOR) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_COLOR) {
buffer.set_voxel(matter_color, x, y, z, params.channel);
}
continue;
} else if (ly >= isosurface_upper_bound) {
// Above is only air
if (params.channel == VoxelBuffer::CHANNEL_SDF) {
if (params.channel == VoxelBufferInternal::CHANNEL_SDF) {
buffer.set_voxel_f(1, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_TYPE) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_TYPE) {
buffer.set_voxel(air_type, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_COLOR) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_COLOR) {
buffer.set_voxel(air_color, x, y, z, params.channel);
}
continue;
@ -220,11 +219,11 @@ VoxelGenerator::Result VoxelGeneratorNoise::generate_block(VoxelBlockRequest &in
float n = get_shaped_noise(noise, lx, ly, lz, one_minus_persistence, bias);
float d = (n + bias) * iso_scale;
if (params.channel == VoxelBuffer::CHANNEL_SDF) {
if (params.channel == VoxelBufferInternal::CHANNEL_SDF) {
buffer.set_voxel_f(d, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_TYPE && d < 0) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_TYPE && d < 0) {
buffer.set_voxel(matter_type, x, y, z, params.channel);
} else if (params.channel == VoxelBuffer::CHANNEL_COLOR && d < 0) {
} else if (params.channel == VoxelBufferInternal::CHANNEL_COLOR && d < 0) {
buffer.set_voxel(matter_color, x, y, z, params.channel);
}
}

View File

@ -11,7 +11,7 @@ public:
VoxelGeneratorNoise();
~VoxelGeneratorNoise();
void set_channel(VoxelBuffer::ChannelId channel);
void set_channel(VoxelBuffer::ChannelId p_channel);
VoxelBuffer::ChannelId get_channel() const;
int get_used_channels_mask() const override;
@ -35,7 +35,7 @@ private:
Ref<OpenSimplexNoise> _noise;
struct Parameters {
VoxelBuffer::ChannelId channel = VoxelBuffer::CHANNEL_SDF;
VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_SDF;
Ref<OpenSimplexNoise> noise;
float height_start = 0;
float height_range = 300;

View File

@ -73,7 +73,7 @@ VoxelGenerator::Result VoxelGeneratorNoise2D::generate_block(VoxelBlockRequest &
ERR_FAIL_COND_V(params.noise.is_null(), result);
OpenSimplexNoise &noise = **params.noise;
VoxelBuffer &out_buffer = **input.voxel_buffer;
VoxelBufferInternal &out_buffer = input.voxel_buffer;
if (_curve.is_null()) {
result = VoxelGeneratorHeightmap::generate(

View File

@ -16,7 +16,7 @@ VoxelGenerator::Result VoxelGeneratorWaves::generate_block(VoxelBlockRequest &in
params = _parameters;
}
VoxelBuffer &out_buffer = **input.voxel_buffer;
VoxelBufferInternal &out_buffer = input.voxel_buffer;
const Vector2 freq(
Math_PI / static_cast<float>(params.pattern_size.x),
Math_PI / static_cast<float>(params.pattern_size.y));

View File

@ -5,7 +5,6 @@ VoxelGenerator::VoxelGenerator() {
}
VoxelGenerator::Result VoxelGenerator::generate_block(VoxelBlockRequest &input) {
ERR_FAIL_COND_V(input.voxel_buffer.is_null(), Result());
return Result();
}
@ -15,7 +14,8 @@ int VoxelGenerator::get_used_channels_mask() const {
void VoxelGenerator::_b_generate_block(Ref<VoxelBuffer> out_buffer, Vector3 origin_in_voxels, int lod) {
ERR_FAIL_COND(lod < 0);
VoxelBlockRequest r = { out_buffer, Vector3i(origin_in_voxels), lod };
ERR_FAIL_COND(out_buffer.is_null());
VoxelBlockRequest r = { out_buffer->get_buffer(), Vector3i(origin_in_voxels), lod };
generate_block(r);
}

View File

@ -7,15 +7,23 @@ VoxelGeneratorScript::VoxelGeneratorScript() {
VoxelGenerator::Result VoxelGeneratorScript::generate_block(VoxelBlockRequest &input) {
Result result;
ERR_FAIL_COND_V(input.voxel_buffer.is_null(), result);
Variant ret;
// Create a temporary wrapper so Godot can pass it to scripts
Ref<VoxelBuffer> buffer_wrapper;
buffer_wrapper.instance();
buffer_wrapper->get_buffer().copy_format(input.voxel_buffer);
buffer_wrapper->get_buffer().create(input.voxel_buffer.get_size());
try_call_script(this, VoxelStringNames::get_singleton()->_generate_block,
input.voxel_buffer, input.origin_in_voxels.to_vec3(), input.lod, &ret);
buffer_wrapper, input.origin_in_voxels.to_vec3(), input.lod, &ret);
// The wrapper is discarded
buffer_wrapper->get_buffer().move_to(input.voxel_buffer);
// We may expose this to scripts the day it actually gets used
// if (ret.get_type() == Variant::DICTIONARY) {
// Dictionary d = ret;
// result.max_lod_hint = d.get("max_lod_hint", false);
// }
return result;
}

View File

@ -419,7 +419,7 @@ void VoxelMesherBlocky::build(VoxelMesher::Output &output, const VoxelMesher::In
// - Slower
// => Could be implemented in a separate class?
const VoxelBuffer &voxels = input.voxels;
const VoxelBufferInternal &voxels = input.voxels;
#ifdef TOOLS_ENABLED
if (input.lod != 0) {
WARN_PRINT("VoxelMesherBlocky received lod != 0, it is not supported");
@ -433,7 +433,7 @@ void VoxelMesherBlocky::build(VoxelMesher::Output &output, const VoxelMesher::In
// That means we can use raw pointers to voxel data inside instead of using the higher-level getters,
// and then save a lot of time.
if (voxels.get_channel_compression(channel) == VoxelBuffer::COMPRESSION_UNIFORM) {
if (voxels.get_channel_compression(channel) == VoxelBufferInternal::COMPRESSION_UNIFORM) {
// All voxels have the same type.
// If it's all air, nothing to do. If it's all cubes, nothing to do either.
// TODO Handle edge case of uniform block with non-cubic voxels!
@ -441,7 +441,7 @@ void VoxelMesherBlocky::build(VoxelMesher::Output &output, const VoxelMesher::In
// decompress into a backing array to still allow the use of the same algorithm.
return;
} else if (voxels.get_channel_compression(channel) != VoxelBuffer::COMPRESSION_NONE) {
} else if (voxels.get_channel_compression(channel) != VoxelBufferInternal::COMPRESSION_NONE) {
// No other form of compression is allowed
ERR_PRINT("VoxelMesherBlocky received unsupported voxel compression");
return;
@ -465,7 +465,7 @@ void VoxelMesherBlocky::build(VoxelMesher::Output &output, const VoxelMesher::In
}
const Vector3i block_size = voxels.get_size();
const VoxelBuffer::Depth channel_depth = voxels.get_channel_depth(channel);
const VoxelBufferInternal::Depth channel_depth = voxels.get_channel_depth(channel);
{
// We can only access baked data. Only this data is made for multithreaded access.
@ -473,12 +473,12 @@ void VoxelMesherBlocky::build(VoxelMesher::Output &output, const VoxelMesher::In
const VoxelLibrary::BakedData &library_baked_data = params.library->get_baked_data();
switch (channel_depth) {
case VoxelBuffer::DEPTH_8_BIT:
case VoxelBufferInternal::DEPTH_8_BIT:
generate_blocky_mesh(cache.arrays_per_material, raw_channel,
block_size, library_baked_data, params.bake_occlusion, baked_occlusion_darkness);
break;
case VoxelBuffer::DEPTH_16_BIT:
case VoxelBufferInternal::DEPTH_16_BIT:
generate_blocky_mesh(cache.arrays_per_material, raw_channel.reinterpret_cast_to<uint16_t>(),
block_size, library_baked_data, params.bake_occlusion, baked_occlusion_darkness);
break;

View File

@ -717,7 +717,7 @@ VoxelMesherCubes::~VoxelMesherCubes() {
void VoxelMesherCubes::build(VoxelMesher::Output &output, const VoxelMesher::Input &input) {
VOXEL_PROFILE_SCOPE();
const int channel = VoxelBuffer::CHANNEL_COLOR;
const int channel = VoxelBufferInternal::CHANNEL_COLOR;
Cache &cache = _cache;
for (unsigned int i = 0; i < cache.arrays_per_material.size(); ++i) {
@ -725,7 +725,7 @@ void VoxelMesherCubes::build(VoxelMesher::Output &output, const VoxelMesher::Inp
a.clear();
}
const VoxelBuffer &voxels = input.voxels;
const VoxelBufferInternal &voxels = input.voxels;
// Iterate 3D padded data to extract voxel faces.
// This is the most intensive job in this class, so all required data should be as fit as possible.
@ -734,12 +734,12 @@ void VoxelMesherCubes::build(VoxelMesher::Output &output, const VoxelMesher::Inp
// That means we can use raw pointers to voxel data inside instead of using the higher-level getters,
// and then save a lot of time.
if (voxels.get_channel_compression(channel) == VoxelBuffer::COMPRESSION_UNIFORM) {
if (voxels.get_channel_compression(channel) == VoxelBufferInternal::COMPRESSION_UNIFORM) {
// All voxels have the same type.
// If it's all air, nothing to do. If it's all cubes, nothing to do either.
return;
} else if (voxels.get_channel_compression(channel) != VoxelBuffer::COMPRESSION_NONE) {
} else if (voxels.get_channel_compression(channel) != VoxelBufferInternal::COMPRESSION_NONE) {
// No other form of compression is allowed
ERR_PRINT("VoxelMesherCubes received unsupported voxel compression");
return;
@ -753,7 +753,7 @@ void VoxelMesherCubes::build(VoxelMesher::Output &output, const VoxelMesher::Inp
}
const Vector3i block_size = voxels.get_size();
const VoxelBuffer::Depth channel_depth = voxels.get_channel_depth(channel);
const VoxelBufferInternal::Depth channel_depth = voxels.get_channel_depth(channel);
Parameters params;
{

View File

@ -1,7 +1,7 @@
#ifndef HERMITE_VALUE_H
#define HERMITE_VALUE_H
#include "../../storage/voxel_buffer.h"
#include "../../storage/voxel_buffer_internal.h"
#include "../../util/math/funcs.h"
#include <core/math/vector3.h>
@ -16,17 +16,18 @@ struct HermiteValue {
}
};
inline float get_isolevel_clamped(const VoxelBuffer &voxels, unsigned int x, unsigned int y, unsigned int z) {
inline float get_isolevel_clamped(const VoxelBufferInternal &voxels, unsigned int x, unsigned int y, unsigned int z) {
x = x >= (unsigned int)voxels.get_size().x ? voxels.get_size().x - 1 : x;
y = y >= (unsigned int)voxels.get_size().y ? voxels.get_size().y - 1 : y;
z = z >= (unsigned int)voxels.get_size().z ? voxels.get_size().z - 1 : z;
return voxels.get_voxel_f(x, y, z, VoxelBuffer::CHANNEL_SDF);
return voxels.get_voxel_f(x, y, z, VoxelBufferInternal::CHANNEL_SDF);
}
inline HermiteValue get_hermite_value(const VoxelBuffer &voxels, unsigned int x, unsigned int y, unsigned int z) {
inline HermiteValue get_hermite_value(const VoxelBufferInternal &voxels,
unsigned int x, unsigned int y, unsigned int z) {
HermiteValue v;
v.sdf = voxels.get_voxel_f(x, y, z, VoxelBuffer::CHANNEL_SDF);
v.sdf = voxels.get_voxel_f(x, y, z, VoxelBufferInternal::CHANNEL_SDF);
Vector3 gradient;
@ -39,7 +40,7 @@ inline HermiteValue get_hermite_value(const VoxelBuffer &voxels, unsigned int x,
return v;
}
inline HermiteValue get_interpolated_hermite_value(const VoxelBuffer &voxels, Vector3 pos) {
inline HermiteValue get_interpolated_hermite_value(const VoxelBufferInternal &voxels, Vector3 pos) {
int x0 = static_cast<int>(pos.x);
int y0 = static_cast<int>(pos.y);
int z0 = static_cast<int>(pos.z);
@ -69,7 +70,9 @@ inline HermiteValue get_interpolated_hermite_value(const VoxelBuffer &voxels, Ve
HermiteValue v;
v.sdf = ::interpolate(v0.sdf, v1.sdf, v2.sdf, v3.sdf, v4.sdf, v5.sdf, v6.sdf, v7.sdf, rpos);
v.gradient = ::interpolate(v0.gradient, v1.gradient, v2.gradient, v3.gradient, v4.gradient, v5.gradient, v6.gradient, v7.gradient, rpos);
v.gradient = ::interpolate(
v0.gradient, v1.gradient, v2.gradient, v3.gradient,
v4.gradient, v5.gradient, v6.gradient, v7.gradient, rpos);
return v;
}

View File

@ -19,11 +19,10 @@ const float SQRT3 = 1.7320508075688772;
// Helper to access padded voxel data
struct VoxelAccess {
const VoxelBuffer &buffer;
const VoxelBufferInternal &buffer;
const Vector3i offset;
VoxelAccess(const VoxelBuffer &p_buffer, Vector3i p_offset) :
VoxelAccess(const VoxelBufferInternal &p_buffer, Vector3i p_offset) :
buffer(p_buffer),
offset(p_offset) {}
@ -40,7 +39,6 @@ struct VoxelAccess {
};
bool can_split(Vector3i node_origin, int node_size, const VoxelAccess &voxels, float geometric_error) {
if (node_size == 1) {
// Voxel resolution, can't split further
return false;
@ -48,7 +46,7 @@ bool can_split(Vector3i node_origin, int node_size, const VoxelAccess &voxels, f
Vector3i origin = node_origin + voxels.offset;
int step = node_size;
int channel = VoxelBuffer::CHANNEL_SDF;
int channel = VoxelBufferInternal::CHANNEL_SDF;
// Don't split if nothing is inside, i.e isolevel distance is greater than the size of the cube we are in
Vector3i center_pos = node_origin + Vector3i(node_size / 2);
@ -125,7 +123,6 @@ bool can_split(Vector3i node_origin, int node_size, const VoxelAccess &voxels, f
float error = 0.0;
for (int i = 0; i < 19; ++i) {
Vector3i pos = positions[i];
HermiteValue value = get_hermite_value(voxels.buffer, pos.x, pos.y, pos.z);
@ -178,12 +175,10 @@ private:
}
void split(OctreeNode *node) {
CRASH_COND(node->has_children());
CRASH_COND(node->size == 1);
for (int i = 0; i < 8; ++i) {
OctreeNode *child = _pool.create();
const int *v = OctreeTables::g_octant_position[i];
child->size = node->size / 2;
@ -209,7 +204,6 @@ public:
}
OctreeNode *build(Vector3i node_origin, int node_size) const {
OctreeNode *children[8] = { nullptr };
bool any_node = false;
@ -228,7 +222,6 @@ public:
if (!any_node) {
// No nodes, test if the 8 octants are worth existing (this could be leaves)
if (can_split(node_origin, node_size, _voxels, _geometry_error)) {
node = _pool.create();
node->origin = node_origin;
node->size = node_size;
@ -297,7 +290,6 @@ inline void scale_positions(PoolVector3Array &positions, float scale) {
}
Array generate_debug_octree_mesh(OctreeNode *root, int scale) {
struct GetMaxDepth {
int max_depth = 0;
void operator()(OctreeNode *_, int depth) {
@ -318,7 +310,6 @@ Array generate_debug_octree_mesh(OctreeNode *root, int scale) {
int max_depth;
void operator()(OctreeNode *node, int depth) {
float shrink = depth * 0.005;
Vector3 o = node->origin.to_vec3() + Vector3(shrink, shrink, shrink);
float s = node->size - 2.0 * shrink;
@ -366,12 +357,10 @@ Array generate_debug_octree_mesh(OctreeNode *root, int scale) {
}
Array generate_debug_dual_grid_mesh(const DualGrid &grid, int scale) {
PoolVector3Array positions;
PoolIntArray indices;
for (unsigned int i = 0; i < grid.cells.size(); ++i) {
const DualCell &cell = grid.cells[i];
int vi = positions.size();
@ -646,7 +635,6 @@ inline void add_cell(DualGrid &grid,
const Vector3 c5,
const Vector3 c6,
const Vector3 c7) {
DualCell cell;
cell.corners[0] = c0;
cell.corners[1] = c1;
@ -669,20 +657,17 @@ void DualGridGenerator::create_border_cells(
const OctreeNode *n5,
const OctreeNode *n6,
const OctreeNode *n7) {
DualGrid &grid = _grid;
// Most boring function ever
if (is_border_back(n0) && is_border_back(n1) && is_border_back(n4) && is_border_back(n5)) {
add_cell(grid,
get_center_back(n0), get_center_back(n1), get_center(n1), get_center(n0),
get_center_back(n4), get_center_back(n5), get_center(n5), get_center(n4));
// Generate back edge border cells
if (is_border_top(n4, _octree_root_size) && is_border_top(n5, _octree_root_size)) {
add_cell(grid,
get_center_back(n4), get_center_back(n5), get_center(n5), get_center(n4),
get_center_back_top(n4), get_center_back_top(n5), get_center_top(n5), get_center_top(n4));
@ -702,7 +687,6 @@ void DualGridGenerator::create_border_cells(
}
if (is_border_bottom(n0) && is_border_bottom(n1)) {
add_cell(grid,
get_center_back_bottom(n0), get_center_back_bottom(n1), get_center_bottom(n1), get_center_bottom(n0),
get_center_back(n0), get_center_back(n1), get_center(n1), get_center(n0));
@ -724,14 +708,12 @@ void DualGridGenerator::create_border_cells(
is_border_front(n3, _octree_root_size) &&
is_border_front(n6, _octree_root_size) &&
is_border_front(n7, _octree_root_size)) {
add_cell(grid,
get_center(n3), get_center(n2), get_center_front(n2), get_center_front(n3),
get_center(n7), get_center(n6), get_center_front(n6), get_center_front(n7));
// Generate front edge border cells
if (is_border_top(n6, _octree_root_size) && is_border_top(n7, _octree_root_size)) {
add_cell(grid,
get_center(n7), get_center(n6), get_center_front(n6), get_center_front(n7),
get_center_top(n7), get_center_top(n6), get_center_front_top(n6), get_center_front_top(n7));
@ -751,7 +733,6 @@ void DualGridGenerator::create_border_cells(
}
if (is_border_bottom(n3) && is_border_bottom(n2)) {
add_cell(grid,
get_center_bottom(n3), get_center_bottom(n2), get_center_front_bottom(n2), get_center_front_bottom(n3),
get_center(n3), get_center(n2), get_center_front(n2), get_center_front(n3));
@ -770,7 +751,6 @@ void DualGridGenerator::create_border_cells(
}
if (is_border_left(n0) && is_border_left(n3) && is_border_left(n4) && is_border_left(n7)) {
add_cell(grid,
get_center_left(n0), get_center(n0), get_center(n3), get_center_left(n3),
get_center_left(n4), get_center(n4), get_center(n7), get_center_left(n7));
@ -805,7 +785,6 @@ void DualGridGenerator::create_border_cells(
is_border_right(n2, _octree_root_size) &&
is_border_right(n5, _octree_root_size) &&
is_border_right(n6, _octree_root_size)) {
add_cell(grid,
get_center(n1), get_center_right(n1), get_center_right(n2), get_center(n2),
get_center(n5), get_center_right(n5), get_center_right(n6), get_center(n6));
@ -840,7 +819,6 @@ void DualGridGenerator::create_border_cells(
is_border_top(n5, _octree_root_size) &&
is_border_top(n6, _octree_root_size) &&
is_border_top(n7, _octree_root_size)) {
add_cell(grid,
get_center(n4), get_center(n5), get_center(n6), get_center(n7),
get_center_top(n4), get_center_top(n5), get_center_top(n6), get_center_top(n7));
@ -869,7 +847,6 @@ void DualGridGenerator::vert_proc(
OctreeNode *n5,
OctreeNode *n6,
OctreeNode *n7) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
const bool n2_has_children = n2->has_children();
@ -882,7 +859,6 @@ void DualGridGenerator::vert_proc(
if (
n0_has_children || n1_has_children || n2_has_children || n3_has_children ||
n4_has_children || n5_has_children || n6_has_children || n7_has_children) {
OctreeNode *c0 = n0_has_children ? n0->children[6] : n0;
OctreeNode *c1 = n1_has_children ? n1->children[7] : n1;
OctreeNode *c2 = n2_has_children ? n2->children[4] : n2;
@ -895,7 +871,6 @@ void DualGridGenerator::vert_proc(
vert_proc(c0, c1, c2, c3, c4, c5, c6, c7);
} else {
if (!(
is_surface_near(n0) ||
is_surface_near(n1) ||
@ -925,7 +900,6 @@ void DualGridGenerator::vert_proc(
}
void DualGridGenerator::edge_proc_x(OctreeNode *n0, OctreeNode *n1, OctreeNode *n2, OctreeNode *n3) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
const bool n2_has_children = n2->has_children();
@ -951,7 +925,6 @@ void DualGridGenerator::edge_proc_x(OctreeNode *n0, OctreeNode *n1, OctreeNode *
}
void DualGridGenerator::edge_proc_y(OctreeNode *n0, OctreeNode *n1, OctreeNode *n2, OctreeNode *n3) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
const bool n2_has_children = n2->has_children();
@ -977,7 +950,6 @@ void DualGridGenerator::edge_proc_y(OctreeNode *n0, OctreeNode *n1, OctreeNode *
}
void DualGridGenerator::edge_proc_z(OctreeNode *n0, OctreeNode *n1, OctreeNode *n2, OctreeNode *n3) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
const bool n2_has_children = n2->has_children();
@ -1003,7 +975,6 @@ void DualGridGenerator::edge_proc_z(OctreeNode *n0, OctreeNode *n1, OctreeNode *
}
void DualGridGenerator::face_proc_xy(OctreeNode *n0, OctreeNode *n1) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
@ -1035,7 +1006,6 @@ void DualGridGenerator::face_proc_xy(OctreeNode *n0, OctreeNode *n1) {
}
void DualGridGenerator::face_proc_zy(OctreeNode *n0, OctreeNode *n1) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
@ -1066,7 +1036,6 @@ void DualGridGenerator::face_proc_zy(OctreeNode *n0, OctreeNode *n1) {
}
void DualGridGenerator::face_proc_xz(OctreeNode *n0, OctreeNode *n1) {
const bool n0_has_children = n0->has_children();
const bool n1_has_children = n1->has_children();
@ -1097,7 +1066,6 @@ void DualGridGenerator::face_proc_xz(OctreeNode *n0, OctreeNode *n1) {
}
void DualGridGenerator::node_proc(OctreeNode *node) {
if (!node->has_children()) {
return;
}
@ -1136,7 +1104,6 @@ void DualGridGenerator::node_proc(OctreeNode *node) {
}
inline Vector3 interpolate(const Vector3 &v0, const Vector3 &v1, const HermiteValue &val0, const HermiteValue &val1, Vector3 &out_normal) {
if (Math::abs(val0.sdf - SURFACE_ISO_LEVEL) <= FLT_EPSILON) {
out_normal = val0.gradient;
out_normal.normalize();
@ -1163,7 +1130,6 @@ inline Vector3 interpolate(const Vector3 &v0, const Vector3 &v1, const HermiteVa
}
void polygonize_cell_marching_squares(const Vector3 *cube_corners, const HermiteValue *cube_values, float max_distance, MeshBuilder &mesh_builder, const int *corner_map) {
// Note:
// Using Ogre's implementation directly resulted in inverted result, because it expects density values instead of SDF,
// So I had to flip a few things around in order to make it work
@ -1240,7 +1206,6 @@ void polygonize_cell_marching_squares(const Vector3 *cube_corners, const Hermite
// Create the triangles according to the table.
for (int i = 0; MarchingCubes::ms_triangles[square_index][i] != -1; i += 3) {
mesh_builder.add_vertex(
intersection_points[MarchingCubes::ms_triangles[square_index][i]],
intersection_normals[MarchingCubes::ms_triangles[square_index][i]]);
@ -1267,7 +1232,6 @@ static const int g_corner_map_bottom[4] = { 3, 2, 1, 0 };
} // namespace MarchingSquares
void add_marching_squares_skirts(const Vector3 *corners, const HermiteValue *values, MeshBuilder &mesh_builder, Vector3 min_pos, Vector3 max_pos) {
float max_distance = 0.2f; // Max distance to the isosurface
if (corners[0].z == min_pos.z) {
@ -1291,7 +1255,6 @@ void add_marching_squares_skirts(const Vector3 *corners, const HermiteValue *val
}
void polygonize_cell_marching_cubes(const Vector3 *corners, const HermiteValue *values, MeshBuilder &mesh_builder) {
unsigned char case_index = 0;
for (int i = 0; i < 8; ++i) {
@ -1349,7 +1312,6 @@ void polygonize_cell_marching_cubes(const Vector3 *corners, const HermiteValue *
// Create the triangles according to the table.
for (int i = 0; MarchingCubes::mc_triangles[case_index][i] != -1; i += 3) {
mesh_builder.add_vertex(
intersection_points[MarchingCubes::mc_triangles[case_index][i]],
intersection_normals[MarchingCubes::mc_triangles[case_index][i]]);
@ -1367,7 +1329,6 @@ void polygonize_cell_marching_cubes(const Vector3 *corners, const HermiteValue *
}
void polygonize_dual_cell(const DualCell &cell, const VoxelAccess &voxels, MeshBuilder &mesh_builder, bool skirts_enabled) {
const Vector3 *corners = cell.corners;
HermiteValue values[8];
@ -1387,14 +1348,13 @@ void polygonize_dual_cell(const DualCell &cell, const VoxelAccess &voxels, MeshB
}
inline void polygonize_dual_grid(const DualGrid &grid, const VoxelAccess &voxels, MeshBuilder &mesh_builder, bool skirts_enabled) {
for (unsigned int i = 0; i < grid.cells.size(); ++i) {
polygonize_dual_cell(grid.cells[i], voxels, mesh_builder, skirts_enabled);
}
}
void polygonize_volume_directly(const VoxelBuffer &voxels, Vector3i min, Vector3i size, MeshBuilder &mesh_builder, bool skirts_enabled) {
void polygonize_volume_directly(const VoxelBufferInternal &voxels, Vector3i min, Vector3i size,
MeshBuilder &mesh_builder, bool skirts_enabled) {
Vector3 corners[8];
HermiteValue values[8];
@ -1407,7 +1367,6 @@ void polygonize_volume_directly(const VoxelBuffer &voxels, Vector3i min, Vector3
for (int z = min.z; z < max.z; ++z) {
for (int x = min.x; x < max.x; ++x) {
for (int y = min.y; y < max.y; ++y) {
values[0] = get_hermite_value(voxels, x, y, z);
values[1] = get_hermite_value(voxels, x + 1, y, z);
values[2] = get_hermite_value(voxels, x + 1, y, z + 1);
@ -1498,9 +1457,9 @@ void VoxelMesherDMC::build(VoxelMesher::Output &output, const VoxelMesher::Input
// - Voxel data must be padded
// - The non-padded area size is cubic and power of two
const VoxelBuffer &voxels = input.voxels;
const VoxelBufferInternal &voxels = input.voxels;
if (voxels.is_uniform(VoxelBuffer::CHANNEL_SDF)) {
if (voxels.is_uniform(VoxelBufferInternal::CHANNEL_SDF)) {
// That won't produce any polygon
_stats = {};
return;
@ -1636,7 +1595,7 @@ Ref<Resource> VoxelMesherDMC::duplicate(bool p_subresources) const {
}
int VoxelMesherDMC::get_used_channels_mask() const {
return (1 << VoxelBuffer::CHANNEL_SDF);
return (1 << VoxelBufferInternal::CHANNEL_SDF);
}
Dictionary VoxelMesherDMC::get_statistics() const {

View File

@ -1138,12 +1138,12 @@ void build_transition_mesh(
template <typename T>
Span<const T> get_or_decompress_channel(
const VoxelBuffer &voxels, std::vector<T> &backing_buffer, unsigned int channel) {
const VoxelBufferInternal &voxels, std::vector<T> &backing_buffer, unsigned int channel) {
//
ERR_FAIL_COND_V(voxels.get_channel_depth(channel) != VoxelBuffer::get_depth_from_size(sizeof(T)),
ERR_FAIL_COND_V(voxels.get_channel_depth(channel) != VoxelBufferInternal::get_depth_from_size(sizeof(T)),
Span<const T>());
if (voxels.get_channel_compression(channel) == VoxelBuffer::COMPRESSION_UNIFORM) {
if (voxels.get_channel_compression(channel) == VoxelBufferInternal::COMPRESSION_UNIFORM) {
backing_buffer.resize(voxels.get_size().volume());
const T v = voxels.get_voxel(Vector3i(), channel);
// TODO Could use a fast fill using 8-byte blocks or intrinsics?
@ -1159,9 +1159,9 @@ Span<const T> get_or_decompress_channel(
}
}
TextureIndicesData get_texture_indices_data(const VoxelBuffer &voxels, unsigned int channel,
TextureIndicesData get_texture_indices_data(const VoxelBufferInternal &voxels, unsigned int channel,
DefaultTextureIndicesData &out_default_texture_indices_data) {
ERR_FAIL_COND_V(voxels.get_channel_depth(channel) != VoxelBuffer::DEPTH_16_BIT, TextureIndicesData());
ERR_FAIL_COND_V(voxels.get_channel_depth(channel) != VoxelBufferInternal::DEPTH_16_BIT, TextureIndicesData());
TextureIndicesData data;
@ -1219,7 +1219,7 @@ struct WeightSamplerPackedU16 {
thread_local std::vector<uint16_t> s_weights_backing_buffer_u16;
#endif
DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel, int lod_index,
DefaultTextureIndicesData build_regular_mesh(const VoxelBufferInternal &voxels, unsigned int sdf_channel, int lod_index,
TexturingMode texturing_mode, Cache &cache, MeshArrays &output) {
VOXEL_PROFILE_SCOPE();
// From this point, we expect the buffer to contain allocated data in the relevant channels.
@ -1237,13 +1237,14 @@ DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned
if (texturing_mode == TEXTURES_BLEND_4_OVER_16) {
// From this point we know SDF is not uniform so it has an allocated buffer,
// but it might have uniform indices or weights so we need to ensure there is a backing buffer.
indices_data = get_texture_indices_data(voxels, VoxelBuffer::CHANNEL_INDICES, default_texture_indices_data);
indices_data =
get_texture_indices_data(voxels, VoxelBufferInternal::CHANNEL_INDICES, default_texture_indices_data);
weights_data.u8_data0 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_0, VoxelBuffer::CHANNEL_WEIGHTS);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_0, VoxelBufferInternal::CHANNEL_WEIGHTS);
weights_data.u8_data1 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_1, VoxelBuffer::CHANNEL_DATA5);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_1, VoxelBufferInternal::CHANNEL_DATA5);
weights_data.u8_data2 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_2, VoxelBuffer::CHANNEL_DATA6);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_2, VoxelBufferInternal::CHANNEL_DATA6);
ERR_FAIL_COND_V(weights_data.u8_data0.size() != voxels_count, default_texture_indices_data);
ERR_FAIL_COND_V(weights_data.u8_data1.size() != voxels_count, default_texture_indices_data);
ERR_FAIL_COND_V(weights_data.u8_data2.size() != voxels_count, default_texture_indices_data);
@ -1253,9 +1254,10 @@ DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned
if (texturing_mode == TEXTURES_BLEND_4_OVER_16) {
// From this point we know SDF is not uniform so it has an allocated buffer,
// but it might have uniform indices or weights so we need to ensure there is a backing buffer.
indices_data = get_texture_indices_data(voxels, VoxelBuffer::CHANNEL_INDICES, default_texture_indices_data);
indices_data =
get_texture_indices_data(voxels, VoxelBufferInternal::CHANNEL_INDICES, default_texture_indices_data);
weights_data.u16_data =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u16, VoxelBuffer::CHANNEL_WEIGHTS);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u16, VoxelBufferInternal::CHANNEL_WEIGHTS);
ERR_FAIL_COND_V(weights_data.u16_data.size() != voxels_count, default_texture_indices_data);
}
#endif
@ -1263,25 +1265,25 @@ DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned
// We settle data types up-front so we can get rid of abstraction layers and conditionals,
// which would otherwise harm performance in tight iterations
switch (voxels.get_channel_depth(sdf_channel)) {
case VoxelBuffer::DEPTH_8_BIT: {
case VoxelBufferInternal::DEPTH_8_BIT: {
Span<const uint8_t> sdf_data = sdf_data_raw.reinterpret_cast_to<const uint8_t>();
build_regular_mesh<uint8_t>(
sdf_data, indices_data, weights_data, voxels.get_size(), lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_16_BIT: {
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<const uint16_t> sdf_data = sdf_data_raw.reinterpret_cast_to<const uint16_t>();
build_regular_mesh<uint16_t>(
sdf_data, indices_data, weights_data, voxels.get_size(), lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_32_BIT: {
case VoxelBufferInternal::DEPTH_32_BIT: {
Span<const float> sdf_data = sdf_data_raw.reinterpret_cast_to<const float>();
build_regular_mesh<float>(
sdf_data, indices_data, weights_data, voxels.get_size(), lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_64_BIT:
case VoxelBufferInternal::DEPTH_64_BIT:
ERR_PRINT("Double-precision SDF channel is not supported");
// Not worth growing executable size for relatively pointless double-precision sdf
break;
@ -1294,7 +1296,7 @@ DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned
return default_texture_indices_data;
}
void build_transition_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel, int direction, int lod_index,
void build_transition_mesh(const VoxelBufferInternal &voxels, unsigned int sdf_channel, int direction, int lod_index,
TexturingMode texturing_mode, Cache &cache, MeshArrays &output,
DefaultTextureIndicesData default_texture_indices_data) {
VOXEL_PROFILE_SCOPE();
@ -1317,14 +1319,15 @@ void build_transition_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel,
// From this point we know SDF is not uniform so it has an allocated buffer,
// but it might have uniform indices or weights so we need to ensure there is a backing buffer.
// TODO Is it worth doing conditionnals instead during meshing?
indices_data = get_texture_indices_data(voxels, VoxelBuffer::CHANNEL_INDICES, default_texture_indices_data);
indices_data = get_texture_indices_data(
voxels, VoxelBufferInternal::CHANNEL_INDICES, default_texture_indices_data);
}
weights_data.u8_data0 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_0, VoxelBuffer::CHANNEL_WEIGHTS);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_0, VoxelBufferInternal::CHANNEL_WEIGHTS);
weights_data.u8_data1 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_1, VoxelBuffer::CHANNEL_DATA5);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_1, VoxelBufferInternal::CHANNEL_DATA5);
weights_data.u8_data2 =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_2, VoxelBuffer::CHANNEL_DATA6);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u8_2, VoxelBufferInternal::CHANNEL_DATA6);
ERR_FAIL_COND(weights_data.u8_data0.size() != voxels_count);
ERR_FAIL_COND(weights_data.u8_data1.size() != voxels_count);
ERR_FAIL_COND(weights_data.u8_data2.size() != voxels_count);
@ -1339,34 +1342,35 @@ void build_transition_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel,
// From this point we know SDF is not uniform so it has an allocated buffer,
// but it might have uniform indices or weights so we need to ensure there is a backing buffer.
// TODO Is it worth doing conditionnals instead during meshing?
indices_data = get_texture_indices_data(voxels, VoxelBuffer::CHANNEL_INDICES, default_texture_indices_data);
indices_data = get_texture_indices_data(
voxels, VoxelBufferInternal::CHANNEL_INDICES, default_texture_indices_data);
}
weights_data.u16_data =
get_or_decompress_channel(voxels, s_weights_backing_buffer_u16, VoxelBuffer::CHANNEL_WEIGHTS);
get_or_decompress_channel(voxels, s_weights_backing_buffer_u16, VoxelBufferInternal::CHANNEL_WEIGHTS);
ERR_FAIL_COND(weights_data.u16_data.size() != voxels_count);
}
#endif
switch (voxels.get_channel_depth(sdf_channel)) {
case VoxelBuffer::DEPTH_8_BIT: {
case VoxelBufferInternal::DEPTH_8_BIT: {
Span<const uint8_t> sdf_data = sdf_data_raw.reinterpret_cast_to<const uint8_t>();
build_transition_mesh<uint8_t>(sdf_data, indices_data, weights_data,
voxels.get_size(), direction, lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_16_BIT: {
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<const uint16_t> sdf_data = sdf_data_raw.reinterpret_cast_to<const uint16_t>();
build_transition_mesh<uint16_t>(sdf_data, indices_data, weights_data,
voxels.get_size(), direction, lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_32_BIT: {
case VoxelBufferInternal::DEPTH_32_BIT: {
Span<const float> sdf_data = sdf_data_raw.reinterpret_cast_to<const float>();
build_transition_mesh<float>(sdf_data, indices_data, weights_data,
voxels.get_size(), direction, lod_index, texturing_mode, cache, output);
} break;
case VoxelBuffer::DEPTH_64_BIT:
case VoxelBufferInternal::DEPTH_64_BIT:
ERR_FAIL_MSG("Double-precision SDF channel is not supported");
// Not worth growing executable size for relatively pointless double-precision sdf
break;

View File

@ -1,7 +1,7 @@
#ifndef TRANSVOXEL_H
#define TRANSVOXEL_H
#include "../../storage/voxel_buffer.h"
#include "../../storage/voxel_buffer_internal.h"
#include "../../util/fixed_array.h"
#include "../../util/math/vector3i.h"
@ -114,10 +114,10 @@ struct DefaultTextureIndicesData {
bool use;
};
DefaultTextureIndicesData build_regular_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel, int lod_index,
DefaultTextureIndicesData build_regular_mesh(const VoxelBufferInternal &voxels, unsigned int sdf_channel, int lod_index,
TexturingMode texturing_mode, Cache &cache, MeshArrays &output);
void build_transition_mesh(const VoxelBuffer &voxels, unsigned int sdf_channel, int direction, int lod_index,
void build_transition_mesh(const VoxelBufferInternal &voxels, unsigned int sdf_channel, int direction, int lod_index,
TexturingMode texturing_mode, Cache &cache, MeshArrays &output,
DefaultTextureIndicesData default_texture_indices_data);

View File

@ -28,11 +28,11 @@ Ref<Resource> VoxelMesherTransvoxel::duplicate(bool p_subresources) const {
int VoxelMesherTransvoxel::get_used_channels_mask() const {
if (_texture_mode == TEXTURES_BLEND_4_OVER_16) {
return (1 << VoxelBuffer::CHANNEL_SDF) |
(1 << VoxelBuffer::CHANNEL_INDICES) |
(1 << VoxelBuffer::CHANNEL_WEIGHTS);
return (1 << VoxelBufferInternal::CHANNEL_SDF) |
(1 << VoxelBufferInternal::CHANNEL_INDICES) |
(1 << VoxelBufferInternal::CHANNEL_WEIGHTS);
}
return (1 << VoxelBuffer::CHANNEL_SDF);
return (1 << VoxelBufferInternal::CHANNEL_SDF);
}
void VoxelMesherTransvoxel::fill_surface_arrays(Array &arrays, const Transvoxel::MeshArrays &src) {
@ -132,7 +132,7 @@ void VoxelMesherTransvoxel::build(VoxelMesher::Output &output, const VoxelMesher
static thread_local Transvoxel::MeshArrays s_mesh_arrays;
static thread_local Transvoxel::MeshArrays s_simplified_mesh_arrays;
const int sdf_channel = VoxelBuffer::CHANNEL_SDF;
const int sdf_channel = VoxelBufferInternal::CHANNEL_SDF;
// Initialize dynamic memory:
// These vectors are re-used.
@ -140,7 +140,7 @@ void VoxelMesherTransvoxel::build(VoxelMesher::Output &output, const VoxelMesher
// Once capacity is big enough, no more memory should be allocated
s_mesh_arrays.clear();
const VoxelBuffer &voxels = input.voxels;
const VoxelBufferInternal &voxels = input.voxels;
if (voxels.is_uniform(sdf_channel)) {
// There won't be anything to polygonize since the SDF has no variations, so it can't cross the isolevel
return;
@ -207,7 +207,7 @@ Ref<ArrayMesh> VoxelMesherTransvoxel::build_transition_mesh(Ref<VoxelBuffer> vox
ERR_FAIL_COND_V(voxels.is_null(), Ref<ArrayMesh>());
if (voxels->is_uniform(VoxelBuffer::CHANNEL_SDF)) {
if (voxels->is_uniform(VoxelBufferInternal::CHANNEL_SDF)) {
// Uniform SDF won't produce any surface
return Ref<ArrayMesh>();
}
@ -216,7 +216,7 @@ Ref<ArrayMesh> VoxelMesherTransvoxel::build_transition_mesh(Ref<VoxelBuffer> vox
// For now we can't support proper texture indices in this specific case
Transvoxel::DefaultTextureIndicesData default_texture_indices_data;
default_texture_indices_data.use = false;
Transvoxel::build_transition_mesh(**voxels, VoxelBuffer::CHANNEL_SDF, direction, 0,
Transvoxel::build_transition_mesh(voxels->get_buffer(), VoxelBufferInternal::CHANNEL_SDF, direction, 0,
static_cast<Transvoxel::TexturingMode>(_texture_mode), s_cache, s_mesh_arrays,
default_texture_indices_data);

View File

@ -6,7 +6,7 @@ Ref<Mesh> VoxelMesher::build_mesh(Ref<VoxelBuffer> voxels, Array materials) {
ERR_FAIL_COND_V(voxels.is_null(), Ref<ArrayMesh>());
Output output;
Input input = { **voxels, 0 };
Input input = { voxels->get_buffer(), 0 };
build(output, input);
if (output.surfaces.empty()) {

View File

@ -6,12 +6,13 @@
#include <scene/resources/mesh.h>
class VoxelBuffer;
class VoxelBufferInternal;
class VoxelMesher : public Resource {
GDCLASS(VoxelMesher, Resource)
public:
struct Input {
const VoxelBuffer &voxels;
const VoxelBufferInternal &voxels;
int lod; // = 0; // Not initialized because it confused GCC
};

View File

@ -1,8 +1,10 @@
#include "voxel_server.h"
#include "../constants/voxel_constants.h"
#include "../util/funcs.h"
#include "../util/godot/funcs.h"
#include "../util/macros.h"
#include "../util/profiling.h"
#include <core/os/memory.h>
#include <scene/main/viewport.h>
#include <thread>
@ -15,12 +17,6 @@ int g_debug_stream_tasks_count = 0;
int g_debug_mesh_tasks_count = 0;
} // namespace
template <typename T>
inline std::shared_ptr<T> gd_make_shared() {
// std::make_shared() apparently wont allow us to specify custom new and delete
return std::shared_ptr<T>(memnew(T), memdelete<T>);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VoxelTimeSpreadTaskRunner::~VoxelTimeSpreadTaskRunner() {
@ -379,7 +375,9 @@ void VoxelServer::request_block_load(uint32_t volume_id, Vector3i block_pos, int
}
}
void VoxelServer::request_voxel_block_save(uint32_t volume_id, Ref<VoxelBuffer> voxels, Vector3i block_pos, int lod) {
void VoxelServer::request_voxel_block_save(uint32_t volume_id, std::shared_ptr<VoxelBufferInternal> voxels,
Vector3i block_pos, int lod) {
//
const Volume &volume = _world.volumes.get(volume_id);
ERR_FAIL_COND(volume.stream.is_null());
CRASH_COND(volume.stream_dependency == nullptr);
@ -443,10 +441,10 @@ void VoxelServer::request_block_save_from_generate_request(BlockGenerateRequest
PRINT_VERBOSE(String("Requesting save of generator output for block {0} lod {1}")
.format(varray(src.position.to_vec3(), src.lod)));
ERR_FAIL_COND(src.voxels.is_null());
BlockDataRequest *r = memnew(BlockDataRequest());
r->voxels = src.voxels->duplicate(true);
// TODO Optimization: `r->voxels` doesnt actually need to be shared
r->voxels = gd_make_shared<VoxelBufferInternal>();
src.voxels->duplicate_to(*r->voxels, true);
r->volume_id = src.volume_id;
r->position = src.position;
r->lod = src.lod;
@ -646,7 +644,8 @@ void VoxelServer::BlockDataRequest::run(VoxelTaskContext ctx) {
switch (type) {
case TYPE_LOAD: {
voxels.instance();
ERR_FAIL_COND(voxels != nullptr);
voxels = gd_make_shared<VoxelBufferInternal>();
voxels->create(block_size, block_size, block_size);
// TODO We should consider batching this again, but it needs to be done carefully.
@ -655,7 +654,7 @@ void VoxelServer::BlockDataRequest::run(VoxelTaskContext ctx) {
// TODO Assign max_lod_hint when available
const VoxelStream::Result voxel_result = stream->emerge_block(voxels, origin_in_voxels, lod);
const VoxelStream::Result voxel_result = stream->emerge_block(*voxels, origin_in_voxels, lod);
if (voxel_result == VoxelStream::RESULT_ERROR) {
ERR_PRINT("Error loading voxel block");
@ -697,13 +696,14 @@ void VoxelServer::BlockDataRequest::run(VoxelTaskContext ctx) {
case TYPE_SAVE: {
if (request_voxels) {
Ref<VoxelBuffer> voxels_copy;
// TODO Is that copy necessary? It's possible it was already done while issuing the request
if (voxels.is_valid()) {
ERR_FAIL_COND(voxels == nullptr);
VoxelBufferInternal voxels_copy;
{
RWLockRead lock(voxels->get_lock());
voxels_copy = voxels->duplicate(true);
// TODO Optimization: is that copy necessary? It's possible it was already done while issuing the request
voxels->duplicate_to(voxels_copy, true);
}
voxels.unref();
voxels = nullptr;
stream->immerge_block(voxels_copy, origin_in_voxels, lod);
}
@ -802,12 +802,12 @@ void VoxelServer::BlockGenerateRequest::run(VoxelTaskContext ctx) {
const Vector3i origin_in_voxels = (position << lod) * block_size;
if (voxels.is_null()) {
voxels.instance();
if (voxels == nullptr) {
voxels = gd_make_shared<VoxelBufferInternal>();
voxels->create(block_size, block_size, block_size);
}
VoxelBlockRequest r{ voxels, origin_in_voxels, lod };
VoxelBlockRequest r{ *voxels, origin_in_voxels, lod };
const VoxelGenerator::Result result = generator->generate_block(r);
max_lod_hint = result.max_lod_hint;
@ -861,7 +861,7 @@ void VoxelServer::BlockGenerateRequest::apply_result() {
// Takes a list of blocks and interprets it as a cube of blocks centered around the area we want to create a mesh from.
// Voxels from central blocks are copied, and part of side blocks are also copied so we get a temporary buffer
// which includes enough neighbors for the mesher to avoid doing bound checks.
static void copy_block_and_neighbors(Span<Ref<VoxelBuffer>> blocks, VoxelBuffer &dst,
static void copy_block_and_neighbors(Span<std::shared_ptr<VoxelBufferInternal>> blocks, VoxelBufferInternal &dst,
int min_padding, int max_padding, int channels_mask) {
VOXEL_PROFILE_SCOPE();
@ -894,8 +894,8 @@ static void copy_block_and_neighbors(Span<Ref<VoxelBuffer>> blocks, VoxelBuffer
// Pick anchor block, usually within the central part of the cube (that block must be valid)
const unsigned int anchor_buffer_index = edge_size * edge_size + edge_size + 1;
Ref<VoxelBuffer> central_buffer = blocks[anchor_buffer_index];
ERR_FAIL_COND_MSG(central_buffer.is_null(), "Central buffer must be valid");
std::shared_ptr<VoxelBufferInternal> &central_buffer = blocks[anchor_buffer_index];
ERR_FAIL_COND_MSG(central_buffer == nullptr, "Central buffer must be valid");
ERR_FAIL_COND_MSG(central_buffer->get_size().all_members_equal() == false, "Central buffer must be cubic");
const int data_block_size = central_buffer->get_size().x;
const int mesh_block_size = data_block_size * mesh_block_size_factor;
@ -916,10 +916,10 @@ static void copy_block_and_neighbors(Span<Ref<VoxelBuffer>> blocks, VoxelBuffer
for (int x = -1; x < edge_size - 1; ++x) {
for (int y = -1; y < edge_size - 1; ++y) {
const Vector3i offset = data_block_size * Vector3i(x, y, z);
Ref<VoxelBuffer> src = blocks[i];
const std::shared_ptr<VoxelBufferInternal> &src = blocks[i];
++i;
if (src.is_null()) {
if (src == nullptr) {
continue;
}
@ -929,7 +929,7 @@ static void copy_block_and_neighbors(Span<Ref<VoxelBuffer>> blocks, VoxelBuffer
{
RWLockRead read(src->get_lock());
for (unsigned int ci = 0; ci < channels_count; ++ci) {
dst.copy_from(**src, src_min, src_max, Vector3(), channels[ci]);
dst.copy_from(*src, src_min, src_max, Vector3(), channels[ci]);
}
}
}
@ -955,13 +955,11 @@ void VoxelServer::BlockMeshRequest::run(VoxelTaskContext ctx) {
const unsigned int max_padding = mesher->get_maximum_padding();
// TODO Cache?
Ref<VoxelBuffer> voxels;
voxels.instance();
VoxelBufferInternal voxels;
copy_block_and_neighbors(to_span(blocks, blocks_count),
**voxels, min_padding, max_padding, mesher->get_used_channels_mask());
VoxelMesher::Input input = { **voxels, lod };
voxels, min_padding, max_padding, mesher->get_used_channels_mask());
const VoxelMesher::Input input = { voxels, lod };
mesher->build(surfaces_output, input);
has_run = true;

View File

@ -59,7 +59,7 @@ public:
};
Type type;
Ref<VoxelBuffer> voxels;
std::shared_ptr<VoxelBufferInternal> voxels;
std::unique_ptr<VoxelInstanceBlockData> instances;
Vector3i position;
uint8_t lod;
@ -69,7 +69,7 @@ public:
struct BlockMeshInput {
// Moore area ordered by forward XYZ iteration
FixedArray<Ref<VoxelBuffer>, VoxelConstants::MAX_BLOCK_COUNT_PER_REQUEST> data_blocks;
FixedArray<std::shared_ptr<VoxelBufferInternal>, VoxelConstants::MAX_BLOCK_COUNT_PER_REQUEST> data_blocks;
unsigned int data_blocks_count = 0;
Vector3i render_block_position;
uint8_t lod = 0;
@ -78,7 +78,6 @@ public:
struct ReceptionBuffers {
void (*mesh_output_callback)(void *, const BlockMeshOutput &) = nullptr;
void *callback_data = nullptr;
//std::vector<BlockMeshOutput> mesh_output;
std::vector<BlockDataOutput> data_output;
};
@ -119,7 +118,8 @@ public:
void invalidate_volume_mesh_requests(uint32_t volume_id);
void request_block_mesh(uint32_t volume_id, const BlockMeshInput &input);
void request_block_load(uint32_t volume_id, Vector3i block_pos, int lod, bool request_instances);
void request_voxel_block_save(uint32_t volume_id, Ref<VoxelBuffer> voxels, Vector3i block_pos, int lod);
void request_voxel_block_save(uint32_t volume_id, std::shared_ptr<VoxelBufferInternal> voxels, Vector3i block_pos,
int lod);
void request_instance_block_save(uint32_t volume_id, std::unique_ptr<VoxelInstanceBlockData> instances,
Vector3i block_pos, int lod);
void remove_volume(uint32_t volume_id);
@ -296,7 +296,7 @@ private:
bool is_cancelled() override;
void apply_result() override;
Ref<VoxelBuffer> voxels;
std::shared_ptr<VoxelBufferInternal> voxels;
std::unique_ptr<VoxelInstanceBlockData> instances;
Vector3i position;
uint32_t volume_id;
@ -323,7 +323,7 @@ private:
bool is_cancelled() override;
void apply_result() override;
Ref<VoxelBuffer> voxels;
std::shared_ptr<VoxelBufferInternal> voxels;
Vector3i position;
uint32_t volume_id;
uint8_t lod;
@ -345,7 +345,7 @@ private:
bool is_cancelled() override;
void apply_result() override;
FixedArray<Ref<VoxelBuffer>, VoxelConstants::MAX_BLOCK_COUNT_PER_REQUEST> blocks;
FixedArray<std::shared_ptr<VoxelBufferInternal>, VoxelConstants::MAX_BLOCK_COUNT_PER_REQUEST> blocks;
Vector3i position;
uint32_t volume_id;
uint8_t lod;

View File

@ -1,645 +1,81 @@
#define VOXEL_BUFFER_USE_MEMORY_POOL
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
#include "voxel_memory_pool.h"
#endif
#include "../edition/voxel_tool_buffer.h"
#include "../util/funcs.h"
#include "../util/profiling.h"
#include "voxel_buffer.h"
#include "../edition/voxel_tool_buffer.h"
#include "../util/godot/funcs.h"
#include <core/func_ref.h>
#include <core/image.h>
#include <core/io/marshalls.h>
#include <core/math/math_funcs.h>
#include <string.h>
namespace {
inline uint8_t *allocate_channel_data(size_t size) {
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
return VoxelMemoryPool::get_singleton()->allocate(size);
#else
return (uint8_t *)memalloc(size * sizeof(uint8_t));
#endif
}
inline void free_channel_data(uint8_t *data, uint32_t size) {
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
VoxelMemoryPool::get_singleton()->recycle(data, size);
#else
memfree(data);
#endif
}
uint64_t g_depth_max_values[] = {
0xff, // 8
0xffff, // 16
0xffffffff, // 32
0xffffffffffffffff // 64
};
inline uint32_t get_depth_bit_count(VoxelBuffer::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBuffer::DEPTH_COUNT);
return VoxelBuffer::get_depth_byte_count(d) << 3;
}
inline uint64_t get_max_value_for_depth(VoxelBuffer::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBuffer::DEPTH_COUNT);
return g_depth_max_values[d];
}
inline uint64_t clamp_value_for_depth(uint64_t value, VoxelBuffer::Depth d) {
const uint64_t max_val = get_max_value_for_depth(d);
if (value >= max_val) {
return max_val;
}
return value;
}
static_assert(sizeof(uint32_t) == sizeof(float), "uint32_t and float cannot be marshalled back and forth");
static_assert(sizeof(uint64_t) == sizeof(double), "uint64_t and double cannot be marshalled back and forth");
inline uint64_t real_to_raw_voxel(real_t value, VoxelBuffer::Depth depth) {
switch (depth) {
case VoxelBuffer::DEPTH_8_BIT:
return norm_to_u8(value);
case VoxelBuffer::DEPTH_16_BIT:
return norm_to_u16(value);
case VoxelBuffer::DEPTH_32_BIT: {
MarshallFloat m;
m.f = value;
return m.i;
}
case VoxelBuffer::DEPTH_64_BIT: {
MarshallDouble m;
m.d = value;
return m.l;
}
default:
CRASH_NOW();
return 0;
}
}
inline real_t raw_voxel_to_real(uint64_t value, VoxelBuffer::Depth depth) {
// Depths below 32 are normalized between -1 and 1
switch (depth) {
case VoxelBuffer::DEPTH_8_BIT:
return u8_to_norm(value);
case VoxelBuffer::DEPTH_16_BIT:
return u16_to_norm(value);
case VoxelBuffer::DEPTH_32_BIT: {
MarshallFloat m;
m.i = value;
return m.f;
}
case VoxelBuffer::DEPTH_64_BIT: {
MarshallDouble m;
m.l = value;
return m.d;
}
default:
CRASH_NOW();
return 0;
}
}
} // namespace
const char *VoxelBuffer::CHANNEL_ID_HINT_STRING = "Type,Sdf,Color,Indices,Weights,Data5,Data6,Data7";
VoxelBuffer::VoxelBuffer() {
// Minecraft uses way more than 255 block types and there is room for eventual metadata such as rotation
_channels[CHANNEL_TYPE].depth = VoxelBuffer::DEFAULT_TYPE_CHANNEL_DEPTH;
_channels[CHANNEL_TYPE].defval = 0;
_buffer = gd_make_shared<VoxelBufferInternal>();
}
// 16-bit is better on average to handle large worlds
_channels[CHANNEL_SDF].depth = VoxelBuffer::DEFAULT_SDF_CHANNEL_DEPTH;
_channels[CHANNEL_SDF].defval = 0xffff;
_channels[CHANNEL_INDICES].depth = VoxelBuffer::DEPTH_16_BIT;
_channels[CHANNEL_INDICES].defval = encode_indices_to_packed_u16(0, 1, 2, 3);
_channels[CHANNEL_WEIGHTS].depth = VoxelBuffer::DEPTH_16_BIT;
_channels[CHANNEL_WEIGHTS].defval = encode_weights_to_packed_u16(15, 0, 0, 0);
VoxelBuffer::VoxelBuffer(std::shared_ptr<VoxelBufferInternal> &other) {
CRASH_COND(other == nullptr);
_buffer = other;
}
VoxelBuffer::~VoxelBuffer() {
clear();
}
void VoxelBuffer::create(unsigned int sx, unsigned int sy, unsigned int sz) {
ERR_FAIL_COND(sx > MAX_SIZE || sy > MAX_SIZE || sz > MAX_SIZE);
clear_voxel_metadata();
Vector3i new_size(sx, sy, sz);
if (new_size != _size) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
Channel &channel = _channels[i];
if (channel.data) {
// Channel already contained data
delete_channel(i);
ERR_FAIL_COND(!create_channel(i, new_size, channel.defval));
}
}
_size = new_size;
}
}
void VoxelBuffer::create(Vector3i size) {
create(size.x, size.y, size.z);
}
void VoxelBuffer::clear() {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
Channel &channel = _channels[i];
if (channel.data) {
delete_channel(i);
}
}
_size = Vector3i();
clear_voxel_metadata();
}
void VoxelBuffer::clear_channel(unsigned int channel_index, uint64_t clear_value) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
delete_channel(channel_index);
}
channel.defval = clamp_value_for_depth(clear_value, channel.depth);
}
void VoxelBuffer::clear_channel_f(unsigned int channel_index, real_t clear_value) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
clear_channel(channel_index, real_to_raw_voxel(clear_value, channel.depth));
}
void VoxelBuffer::set_default_values(FixedArray<uint64_t, VoxelBuffer::MAX_CHANNELS> values) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
_channels[i].defval = clamp_value_for_depth(values[i], _channels[i].depth);
}
}
uint64_t VoxelBuffer::get_voxel(int x, int y, int z, unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, 0);
ERR_FAIL_COND_V_MSG(!is_position_valid(x, y, z), 0, String("At position ({0}, {1}, {2})").format(varray(x, y, z)));
const Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
const uint32_t i = get_index(x, y, z);
switch (channel.depth) {
case DEPTH_8_BIT:
return channel.data[i];
case DEPTH_16_BIT:
return reinterpret_cast<uint16_t *>(channel.data)[i];
case DEPTH_32_BIT:
return reinterpret_cast<uint32_t *>(channel.data)[i];
case DEPTH_64_BIT:
return reinterpret_cast<uint64_t *>(channel.data)[i];
default:
CRASH_NOW();
return 0;
}
} else {
return channel.defval;
}
}
void VoxelBuffer::set_voxel(uint64_t value, int x, int y, int z, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_COND_MSG(!is_position_valid(x, y, z), String("At position ({0}, {1}, {2})").format(varray(x, y, z)));
Channel &channel = _channels[channel_index];
value = clamp_value_for_depth(value, channel.depth);
bool do_set = true;
if (channel.data == nullptr) {
if (channel.defval != value) {
// Allocate channel with same initial values as defval
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
} else {
do_set = false;
}
}
if (do_set) {
const uint32_t i = get_index(x, y, z);
switch (channel.depth) {
case DEPTH_8_BIT:
channel.data[i] = value;
break;
case DEPTH_16_BIT:
reinterpret_cast<uint16_t *>(channel.data)[i] = value;
break;
case DEPTH_32_BIT:
reinterpret_cast<uint32_t *>(channel.data)[i] = value;
break;
case DEPTH_64_BIT:
reinterpret_cast<uint64_t *>(channel.data)[i] = value;
break;
default:
CRASH_NOW();
break;
}
}
_buffer->clear();
}
real_t VoxelBuffer::get_voxel_f(int x, int y, int z, unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, 0);
return raw_voxel_to_real(get_voxel(x, y, z, channel_index), _channels[channel_index].depth);
return _buffer->get_voxel_f(x, y, z, channel_index);
}
void VoxelBuffer::set_voxel_f(real_t value, int x, int y, int z, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
set_voxel(real_to_raw_voxel(value, _channels[channel_index].depth), x, y, z, channel_index);
return _buffer->set_voxel_f(value, x, y, z, channel_index);
}
void VoxelBuffer::copy_channel_from(Ref<VoxelBuffer> other, unsigned int channel) {
ERR_FAIL_COND(other.is_null());
_buffer->copy_from(other->get_buffer(), channel);
}
void VoxelBuffer::copy_channel_from_area(Ref<VoxelBuffer> other, Vector3 src_min, Vector3 src_max, Vector3 dst_min,
unsigned int channel) {
ERR_FAIL_COND(other.is_null());
_buffer->copy_from(other->get_buffer(), Vector3i(src_min), Vector3i(src_max), Vector3i(dst_min), channel);
}
void VoxelBuffer::fill(uint64_t defval, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
defval = clamp_value_for_depth(defval, channel.depth);
if (channel.data == nullptr) {
// Channel is already optimized and uniform
if (channel.defval == defval) {
// No change
return;
} else {
// Just change default value
channel.defval = defval;
return;
}
}
const size_t volume = get_volume();
switch (channel.depth) {
case DEPTH_8_BIT:
memset(channel.data, defval, channel.size_in_bytes);
break;
case DEPTH_16_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint16_t *>(channel.data)[i] = defval;
}
break;
case DEPTH_32_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint32_t *>(channel.data)[i] = defval;
}
break;
case DEPTH_64_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint64_t *>(channel.data)[i] = defval;
}
break;
default:
CRASH_NOW();
break;
}
}
void VoxelBuffer::fill_area(uint64_t defval, Vector3i min, Vector3i max, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Vector3i::sort_min_max(min, max);
min.clamp_to(Vector3i(0, 0, 0), _size + Vector3i(1, 1, 1));
max.clamp_to(Vector3i(0, 0, 0), _size + Vector3i(1, 1, 1));
const Vector3i area_size = max - min;
if (area_size.x == 0 || area_size.y == 0 || area_size.z == 0) {
return;
}
Channel &channel = _channels[channel_index];
defval = clamp_value_for_depth(defval, channel.depth);
if (channel.data == nullptr) {
if (channel.defval == defval) {
return;
} else {
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
}
Vector3i pos;
const size_t volume = get_volume();
for (pos.z = min.z; pos.z < max.z; ++pos.z) {
for (pos.x = min.x; pos.x < max.x; ++pos.x) {
const size_t dst_ri = get_index(pos.x, pos.y + min.y, pos.z);
CRASH_COND(dst_ri >= volume);
switch (channel.depth) {
case DEPTH_8_BIT:
// Fill row by row
memset(&channel.data[dst_ri], defval, area_size.y * sizeof(uint8_t));
break;
case DEPTH_16_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint16_t *)channel.data)[dst_ri + i] = defval;
}
break;
case DEPTH_32_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint32_t *)channel.data)[dst_ri + i] = defval;
}
break;
case DEPTH_64_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint64_t *)channel.data)[dst_ri + i] = defval;
}
break;
default:
CRASH_NOW();
break;
}
}
}
}
void VoxelBuffer::fill_area_f(float fvalue, Vector3i min, Vector3i max, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
fill_area(real_to_raw_voxel(fvalue, channel.depth), min, max, channel_index);
_buffer->fill(defval, channel_index);
}
void VoxelBuffer::fill_f(real_t value, unsigned int channel) {
ERR_FAIL_INDEX(channel, MAX_CHANNELS);
fill(real_to_raw_voxel(value, _channels[channel].depth), channel);
}
template <typename T>
inline bool is_uniform_b(const uint8_t *data, size_t item_count) {
return is_uniform<T>(reinterpret_cast<const T *>(data), item_count);
_buffer->fill_f(value, channel);
}
bool VoxelBuffer::is_uniform(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, true);
const Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
// Channel has been optimized
return true;
}
const size_t volume = get_volume();
// Channel isn't optimized, so must look at each voxel
switch (channel.depth) {
case DEPTH_8_BIT:
return ::is_uniform_b<uint8_t>(channel.data, volume);
case DEPTH_16_BIT:
return ::is_uniform_b<uint16_t>(channel.data, volume);
case DEPTH_32_BIT:
return ::is_uniform_b<uint32_t>(channel.data, volume);
case DEPTH_64_BIT:
return ::is_uniform_b<uint64_t>(channel.data, volume);
default:
CRASH_NOW();
break;
}
return true;
return _buffer->is_uniform(channel_index);
}
void VoxelBuffer::compress_uniform_channels() {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
if (_channels[i].data != nullptr && is_uniform(i)) {
// TODO More direct way
const uint64_t v = get_voxel(0, 0, 0, i);
clear_channel(i, v);
}
}
}
void VoxelBuffer::decompress_channel(unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
_buffer->compress_uniform_channels();
}
VoxelBuffer::Compression VoxelBuffer::get_channel_compression(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, VoxelBuffer::COMPRESSION_NONE);
const Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
return COMPRESSION_UNIFORM;
}
return COMPRESSION_NONE;
return VoxelBuffer::Compression(_buffer->get_channel_compression(channel_index));
}
void VoxelBuffer::copy_format(const VoxelBuffer &other) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
set_channel_depth(i, other.get_channel_depth(i));
}
}
void VoxelBuffer::copy_from(const VoxelBuffer &other) {
// Copy all channels, assuming sizes and formats match
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
copy_from(other, i);
}
}
void VoxelBuffer::copy_from(const VoxelBuffer &other, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_COND(other._size != _size);
Channel &channel = _channels[channel_index];
const Channel &other_channel = other._channels[channel_index];
ERR_FAIL_COND(other_channel.depth != channel.depth);
if (other_channel.data != nullptr) {
if (channel.data == nullptr) {
ERR_FAIL_COND(!create_channel_noinit(channel_index, _size));
}
CRASH_COND(channel.size_in_bytes != other_channel.size_in_bytes);
memcpy(channel.data, other_channel.data, channel.size_in_bytes);
} else if (channel.data != nullptr) {
delete_channel(channel_index);
}
channel.defval = other_channel.defval;
channel.depth = other_channel.depth;
}
// TODO Disallow copying from overlapping areas of the same buffer
void VoxelBuffer::copy_from(const VoxelBuffer &other, Vector3i src_min, Vector3i src_max, Vector3i dst_min,
unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
const Channel &other_channel = other._channels[channel_index];
ERR_FAIL_COND(other_channel.depth != channel.depth);
if (channel.data == nullptr && other_channel.data == nullptr && channel.defval == other_channel.defval) {
// No action needed
return;
}
if (other_channel.data != nullptr) {
if (channel.data == nullptr) {
// Note, we do this even if the pasted data happens to be all the same value as our current channel.
// We assume that this case is not frequent enough to bother, and compression can happen later
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
const unsigned int item_size = get_depth_byte_count(channel.depth);
Span<const uint8_t> src(other_channel.data, other_channel.size_in_bytes);
Span<uint8_t> dst(channel.data, channel.size_in_bytes);
copy_3d_region_zxy(dst, _size, dst_min, src, other._size, src_min, src_max, item_size);
} else if (channel.defval != other_channel.defval) {
// This logic is still required due to how source and destination regions can be specified.
// The actual size of the destination area must be determined from the source area, after it has been clipped.
Vector3i::sort_min_max(src_min, src_max);
clip_copy_region(src_min, src_max, other._size, dst_min, _size);
const Vector3i area_size = src_max - src_min;
if (area_size.x <= 0 || area_size.y <= 0 || area_size.z <= 0) {
// Degenerate area, we'll not copy anything.
return;
}
fill_area(other_channel.defval, dst_min, dst_min + area_size, channel_index);
}
void VoxelBuffer::downscale_to(Ref<VoxelBuffer> dst, Vector3 src_min, Vector3 src_max, Vector3 dst_min) const {
ERR_FAIL_COND(dst.is_null());
_buffer->downscale_to(dst->get_buffer(), Vector3i(src_min), Vector3i(src_max), Vector3i(dst_min));
}
Ref<VoxelBuffer> VoxelBuffer::duplicate(bool include_metadata) const {
VoxelBuffer *d = memnew(VoxelBuffer);
d->create(_size);
for (unsigned int i = 0; i < _channels.size(); ++i) {
d->set_channel_depth(i, _channels[i].depth);
}
d->copy_from(*this);
if (include_metadata) {
d->copy_voxel_metadata(*this);
}
return Ref<VoxelBuffer>(d);
}
bool VoxelBuffer::get_channel_raw(unsigned int channel_index, Span<uint8_t> &slice) const {
const Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
slice = Span<uint8_t>(channel.data, 0, channel.size_in_bytes);
return true;
}
slice = Span<uint8_t>();
return false;
}
bool VoxelBuffer::create_channel(int i, Vector3i size, uint64_t defval) {
if (!create_channel_noinit(i, size)) {
return false;
}
fill(defval, i);
return true;
}
uint32_t VoxelBuffer::get_size_in_bytes_for_volume(Vector3i size, Depth depth) {
// Calculate appropriate size based on bit depth
const unsigned int volume = size.x * size.y * size.z;
const unsigned int bits = volume * ::get_depth_bit_count(depth);
const unsigned int size_in_bytes = (bits >> 3);
return size_in_bytes;
}
bool VoxelBuffer::create_channel_noinit(int i, Vector3i size) {
Channel &channel = _channels[i];
size_t size_in_bytes = get_size_in_bytes_for_volume(size, channel.depth);
CRASH_COND(channel.data != nullptr);
channel.data = allocate_channel_data(size_in_bytes);
ERR_FAIL_COND_V(channel.data == nullptr, false);
channel.size_in_bytes = size_in_bytes;
return true;
}
void VoxelBuffer::delete_channel(int i) {
Channel &channel = _channels[i];
ERR_FAIL_COND(channel.data == nullptr);
free_channel_data(channel.data, channel.size_in_bytes);
channel.data = nullptr;
channel.size_in_bytes = 0;
}
void VoxelBuffer::downscale_to(VoxelBuffer &dst, Vector3i src_min, Vector3i src_max, Vector3i dst_min) const {
// TODO Align input to multiple of two
src_min.clamp_to(Vector3i(), _size);
src_max.clamp_to(Vector3i(), _size + Vector3i(1));
Vector3i dst_max = dst_min + ((src_max - src_min) >> 1);
// TODO This will be wrong if it overlaps the border?
dst_min.clamp_to(Vector3i(), dst._size);
dst_max.clamp_to(Vector3i(), dst._size + Vector3i(1));
for (int channel_index = 0; channel_index < MAX_CHANNELS; ++channel_index) {
const Channel &src_channel = _channels[channel_index];
const Channel &dst_channel = dst._channels[channel_index];
if (src_channel.data == nullptr && dst_channel.data == nullptr && src_channel.defval == dst_channel.defval) {
// No action needed
continue;
}
// Nearest-neighbor downscaling
Vector3i pos;
for (pos.z = dst_min.z; pos.z < dst_max.z; ++pos.z) {
for (pos.x = dst_min.x; pos.x < dst_max.x; ++pos.x) {
for (pos.y = dst_min.y; pos.y < dst_max.y; ++pos.y) {
const Vector3i src_pos = src_min + ((pos - dst_min) << 1);
// TODO Remove check once it works
CRASH_COND(!is_position_valid(src_pos.x, src_pos.y, src_pos.z));
uint64_t v;
if (src_channel.data) {
// TODO Optimized version?
v = get_voxel(src_pos, channel_index);
} else {
v = src_channel.defval;
}
dst.set_voxel(v, pos, channel_index);
}
}
}
}
Ref<VoxelBuffer> d;
d.instance();
_buffer->duplicate_to(d->get_buffer(), include_metadata);
return d;
}
Ref<VoxelTool> VoxelBuffer::get_voxel_tool() {
@ -649,225 +85,62 @@ Ref<VoxelTool> VoxelBuffer::get_voxel_tool() {
return Ref<VoxelTool>(memnew(VoxelToolBuffer(vb)));
}
bool VoxelBuffer::equals(const VoxelBuffer &p_other) const {
if (p_other._size != _size) {
return false;
}
for (int channel_index = 0; channel_index < MAX_CHANNELS; ++channel_index) {
const Channel &channel = _channels[channel_index];
const Channel &other_channel = p_other._channels[channel_index];
if ((channel.data == nullptr) != (other_channel.data == nullptr)) {
// Note: they could still logically be equal if one channel contains uniform voxel memory
return false;
}
if (channel.depth != other_channel.depth) {
return false;
}
if (channel.data == nullptr) {
if (channel.defval != other_channel.defval) {
return false;
}
} else {
ERR_FAIL_COND_V(channel.size_in_bytes != other_channel.size_in_bytes, false);
for (size_t i = 0; i < channel.size_in_bytes; ++i) {
if (channel.data[i] != other_channel.data[i]) {
return false;
}
}
}
}
return true;
}
void VoxelBuffer::set_channel_depth(unsigned int channel_index, Depth new_depth) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_INDEX(new_depth, DEPTH_COUNT);
Channel &channel = _channels[channel_index];
if (channel.depth == new_depth) {
return;
}
if (channel.data != nullptr) {
// TODO Implement conversion and do it when specified
WARN_PRINT("Changing VoxelBuffer depth with present data, this will reset the channel");
delete_channel(channel_index);
}
channel.defval = clamp_value_for_depth(channel.defval, new_depth);
channel.depth = new_depth;
_buffer->set_channel_depth(channel_index, VoxelBufferInternal::Depth(new_depth));
}
VoxelBuffer::Depth VoxelBuffer::get_channel_depth(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, DEPTH_8_BIT);
return _channels[channel_index].depth;
}
uint32_t VoxelBuffer::get_depth_bit_count(Depth d) {
return ::get_depth_bit_count(d);
}
float VoxelBuffer::get_sdf_quantization_scale(Depth d) {
switch (d) {
// Normalized
case DEPTH_8_BIT:
return VoxelConstants::QUANTIZED_SDF_8_BITS_SCALE;
case DEPTH_16_BIT:
return VoxelConstants::QUANTIZED_SDF_16_BITS_SCALE;
// Direct
default:
return 1.f;
}
return VoxelBuffer::Depth(_buffer->get_channel_depth(channel_index));
}
void VoxelBuffer::set_block_metadata(Variant meta) {
_block_metadata = meta;
}
Variant VoxelBuffer::get_voxel_metadata(Vector3i pos) const {
ERR_FAIL_COND_V(!is_position_valid(pos), Variant());
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.find(pos);
if (elem != nullptr) {
return elem->value();
} else {
return Variant();
}
}
void VoxelBuffer::set_voxel_metadata(Vector3i pos, Variant meta) {
ERR_FAIL_COND(!is_position_valid(pos));
if (meta.get_type() == Variant::NIL) {
_voxel_metadata.erase(pos);
} else {
_voxel_metadata[pos] = meta;
}
_buffer->set_block_metadata(meta);
}
void VoxelBuffer::for_each_voxel_metadata(Ref<FuncRef> callback) const {
ERR_FAIL_COND(callback.is_null());
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
const Variant key = elem->key().to_vec3();
const Variant *args[2] = { &key, &elem->value() };
Variant::CallError err;
callback->call_func(args, 2, err);
ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK,
String("FuncRef call failed at {0}").format(varray(key)));
// TODO Can't provide detailed error because FuncRef doesn't give us access to the object
// ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK, false,
// Variant::get_call_error_text(callback->get_object(), method_name, nullptr, 0, err));
elem = elem->next();
}
_buffer->for_each_voxel_metadata(callback);
}
void VoxelBuffer::for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Box3i box) const {
void VoxelBuffer::for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Vector3 min_pos, Vector3 max_pos) {
ERR_FAIL_COND(callback.is_null());
for_each_voxel_metadata_in_area(box, [&callback](Vector3i pos, Variant meta) {
const Variant key = pos.to_vec3();
const Variant *args[2] = { &key, &meta };
Variant::CallError err;
callback->call_func(args, 2, err);
_buffer->for_each_voxel_metadata_in_area(callback, Box3i::from_min_max(Vector3i(min_pos), Vector3i(max_pos)));
}
ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK,
String("FuncRef call failed at {0}").format(varray(key)));
// TODO Can't provide detailed error because FuncRef doesn't give us access to the object
// ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK, false,
// Variant::get_call_error_text(callback->get_object(), method_name, nullptr, 0, err));
});
void VoxelBuffer::copy_voxel_metadata_in_area(Ref<VoxelBuffer> src_buffer, Vector3 src_min_pos, Vector3 src_max_pos,
Vector3 dst_pos) {
ERR_FAIL_COND(src_buffer.is_null());
_buffer->copy_voxel_metadata_in_area(
src_buffer->get_buffer(), Box3i::from_min_max(Vector3i(src_min_pos), Vector3i(src_max_pos)), dst_pos);
}
void VoxelBuffer::clear_voxel_metadata_in_area(Vector3 min_pos, Vector3 max_pos) {
_buffer->clear_voxel_metadata_in_area(Box3i::from_min_max(Vector3i(min_pos), Vector3i(max_pos)));
}
void VoxelBuffer::clear_voxel_metadata() {
_voxel_metadata.clear();
}
void VoxelBuffer::clear_voxel_metadata_in_area(Box3i box) {
Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
Map<Vector3i, Variant>::Element *next_elem = elem->next();
if (box.contains(elem->key())) {
_voxel_metadata.erase(elem);
}
elem = next_elem;
}
}
void VoxelBuffer::copy_voxel_metadata_in_area(Ref<VoxelBuffer> src_buffer, Box3i src_box, Vector3i dst_origin) {
ERR_FAIL_COND(src_buffer.is_null());
ERR_FAIL_COND(!src_buffer->is_box_valid(src_box));
const Box3i clipped_src_box = src_box.clipped(Box3i(src_box.pos - dst_origin, _size));
const Vector3i clipped_dst_offset = dst_origin + clipped_src_box.pos - src_box.pos;
const Map<Vector3i, Variant>::Element *elem = src_buffer->_voxel_metadata.front();
while (elem != nullptr) {
const Vector3i src_pos = elem->key();
if (src_box.contains(src_pos)) {
const Vector3i dst_pos = src_pos + clipped_dst_offset;
CRASH_COND(!is_position_valid(dst_pos));
_voxel_metadata[dst_pos] = elem->value().duplicate();
}
elem = elem->next();
}
}
void VoxelBuffer::copy_voxel_metadata(const VoxelBuffer &src_buffer) {
ERR_FAIL_COND(src_buffer.get_size() != _size);
const Map<Vector3i, Variant>::Element *elem = src_buffer._voxel_metadata.front();
while (elem != nullptr) {
const Vector3i pos = elem->key();
_voxel_metadata[pos] = elem->value().duplicate();
elem = elem->next();
}
_block_metadata = src_buffer._block_metadata.duplicate();
_buffer->clear_voxel_metadata();
}
Ref<Image> VoxelBuffer::debug_print_sdf_to_image_top_down() {
Image *im = memnew(Image);
im->create(_size.x, _size.z, false, Image::FORMAT_RGB8);
im->lock();
Vector3i pos;
for (pos.z = 0; pos.z < _size.z; ++pos.z) {
for (pos.x = 0; pos.x < _size.x; ++pos.x) {
for (pos.y = _size.y - 1; pos.y >= 0; --pos.y) {
float v = get_voxel_f(pos.x, pos.y, pos.z, CHANNEL_SDF);
if (v < 0.0) {
break;
}
}
float h = pos.y;
float c = h / _size.y;
im->set_pixel(pos.x, pos.z, Color(c, c, c));
}
}
im->unlock();
return Ref<Image>(im);
return _buffer->debug_print_sdf_to_image_top_down();
}
void VoxelBuffer::_bind_methods() {
ClassDB::bind_method(D_METHOD("create", "sx", "sy", "sz"), &VoxelBuffer::_b_create);
ClassDB::bind_method(D_METHOD("create", "sx", "sy", "sz"), &VoxelBuffer::create);
ClassDB::bind_method(D_METHOD("clear"), &VoxelBuffer::clear);
ClassDB::bind_method(D_METHOD("get_size"), &VoxelBuffer::_b_get_size);
ClassDB::bind_method(D_METHOD("get_size"), &VoxelBuffer::get_size);
ClassDB::bind_method(D_METHOD("get_size_x"), &VoxelBuffer::get_size_x);
ClassDB::bind_method(D_METHOD("get_size_y"), &VoxelBuffer::get_size_y);
ClassDB::bind_method(D_METHOD("get_size_z"), &VoxelBuffer::get_size_z);
ClassDB::bind_method(D_METHOD("set_voxel", "value", "x", "y", "z", "channel"),
&VoxelBuffer::_b_set_voxel, DEFVAL(0));
&VoxelBuffer::set_voxel, DEFVAL(0));
ClassDB::bind_method(D_METHOD("set_voxel_f", "value", "x", "y", "z", "channel"),
&VoxelBuffer::_b_set_voxel_f, DEFVAL(0));
ClassDB::bind_method(D_METHOD("set_voxel_v", "value", "pos", "channel"), &VoxelBuffer::_b_set_voxel_v, DEFVAL(0));
ClassDB::bind_method(D_METHOD("get_voxel", "x", "y", "z", "channel"), &VoxelBuffer::_b_get_voxel, DEFVAL(0));
&VoxelBuffer::set_voxel_f, DEFVAL(0));
ClassDB::bind_method(D_METHOD("set_voxel_v", "value", "pos", "channel"), &VoxelBuffer::set_voxel_v, DEFVAL(0));
ClassDB::bind_method(D_METHOD("get_voxel", "x", "y", "z", "channel"), &VoxelBuffer::get_voxel, DEFVAL(0));
ClassDB::bind_method(D_METHOD("get_voxel_f", "x", "y", "z", "channel"), &VoxelBuffer::get_voxel_f, DEFVAL(0));
ClassDB::bind_method(D_METHOD("get_voxel_tool"), &VoxelBuffer::get_voxel_tool);
@ -877,12 +150,12 @@ void VoxelBuffer::_bind_methods() {
ClassDB::bind_method(D_METHOD("fill", "value", "channel"), &VoxelBuffer::fill, DEFVAL(0));
ClassDB::bind_method(D_METHOD("fill_f", "value", "channel"), &VoxelBuffer::fill_f, DEFVAL(0));
ClassDB::bind_method(D_METHOD("fill_area", "value", "min", "max", "channel"),
&VoxelBuffer::_b_fill_area, DEFVAL(0));
ClassDB::bind_method(D_METHOD("copy_channel_from", "other", "channel"), &VoxelBuffer::_b_copy_channel_from);
&VoxelBuffer::fill_area, DEFVAL(0));
ClassDB::bind_method(D_METHOD("copy_channel_from", "other", "channel"), &VoxelBuffer::copy_channel_from);
ClassDB::bind_method(D_METHOD("copy_channel_from_area", "other", "src_min", "src_max", "dst_min", "channel"),
&VoxelBuffer::_b_copy_channel_from_area);
&VoxelBuffer::copy_channel_from_area);
ClassDB::bind_method(D_METHOD("downscale_to", "dst", "src_min", "src_max", "dst_min"),
&VoxelBuffer::_b_downscale_to);
&VoxelBuffer::downscale_to);
ClassDB::bind_method(D_METHOD("is_uniform", "channel"), &VoxelBuffer::is_uniform);
// TODO Rename `compress_uniform_channels`
@ -891,17 +164,17 @@ void VoxelBuffer::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_block_metadata"), &VoxelBuffer::get_block_metadata);
ClassDB::bind_method(D_METHOD("set_block_metadata", "meta"), &VoxelBuffer::set_block_metadata);
ClassDB::bind_method(D_METHOD("get_voxel_metadata", "pos"), &VoxelBuffer::_b_get_voxel_metadata);
ClassDB::bind_method(D_METHOD("set_voxel_metadata", "pos", "value"), &VoxelBuffer::_b_set_voxel_metadata);
ClassDB::bind_method(D_METHOD("get_voxel_metadata", "pos"), &VoxelBuffer::get_voxel_metadata);
ClassDB::bind_method(D_METHOD("set_voxel_metadata", "pos", "value"), &VoxelBuffer::set_voxel_metadata);
ClassDB::bind_method(D_METHOD("for_each_voxel_metadata", "callback"), &VoxelBuffer::for_each_voxel_metadata);
ClassDB::bind_method(D_METHOD("for_each_voxel_metadata_in_area", "callback", "min_pos", "max_pos"),
&VoxelBuffer::_b_for_each_voxel_metadata_in_area);
&VoxelBuffer::for_each_voxel_metadata_in_area);
ClassDB::bind_method(D_METHOD("clear_voxel_metadata"), &VoxelBuffer::clear_voxel_metadata);
ClassDB::bind_method(D_METHOD("clear_voxel_metadata_in_area", "min_pos", "max_pos"),
&VoxelBuffer::_b_clear_voxel_metadata_in_area);
&VoxelBuffer::clear_voxel_metadata_in_area);
ClassDB::bind_method(
D_METHOD("copy_voxel_metadata_in_area", "src_buffer", "src_min_pos", "src_max_pos", "dst_min_pos"),
&VoxelBuffer::_b_copy_voxel_metadata_in_area);
&VoxelBuffer::copy_voxel_metadata_in_area);
BIND_ENUM_CONSTANT(CHANNEL_TYPE);
BIND_ENUM_CONSTANT(CHANNEL_SDF);
@ -925,33 +198,3 @@ void VoxelBuffer::_bind_methods() {
BIND_CONSTANT(MAX_SIZE);
}
void VoxelBuffer::_b_copy_channel_from(Ref<VoxelBuffer> other, unsigned int channel) {
ERR_FAIL_COND(other.is_null());
copy_from(**other, channel);
}
void VoxelBuffer::_b_copy_channel_from_area(Ref<VoxelBuffer> other, Vector3 src_min, Vector3 src_max, Vector3 dst_min,
unsigned int channel) {
ERR_FAIL_COND(other.is_null());
copy_from(**other, Vector3i(src_min), Vector3i(src_max), Vector3i(dst_min), channel);
}
void VoxelBuffer::_b_downscale_to(Ref<VoxelBuffer> dst, Vector3 src_min, Vector3 src_max, Vector3 dst_min) const {
ERR_FAIL_COND(dst.is_null());
downscale_to(**dst, Vector3i(src_min), Vector3i(src_max), Vector3i(dst_min));
}
void VoxelBuffer::_b_for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Vector3 min_pos, Vector3 max_pos) {
for_each_voxel_metadata_in_area(callback, Box3i::from_min_max(Vector3i(min_pos), Vector3i(max_pos)));
}
void VoxelBuffer::_b_clear_voxel_metadata_in_area(Vector3 min_pos, Vector3 max_pos) {
clear_voxel_metadata_in_area(Box3i::from_min_max(Vector3i(min_pos), Vector3i(max_pos)));
}
void VoxelBuffer::_b_copy_voxel_metadata_in_area(Ref<VoxelBuffer> src_buffer, Vector3 src_min_pos, Vector3 src_max_pos,
Vector3 dst_pos) {
copy_voxel_metadata_in_area(
src_buffer, Box3i::from_min_max(Vector3i(src_min_pos), Vector3i(src_max_pos)), dst_pos);
}

View File

@ -1,394 +1,113 @@
#ifndef VOXEL_BUFFER_H
#define VOXEL_BUFFER_H
#include "../constants/voxel_constants.h"
#include "../util/fixed_array.h"
#include "../util/math/box3i.h"
#include "../util/span.h"
#include "funcs.h"
#include <core/map.h>
#include <core/reference.h>
#include <core/vector.h>
#include "voxel_buffer_internal.h"
class VoxelTool;
class Image;
class FuncRef;
// Dense voxels data storage.
// Organized in channels of configurable bit depth.
// Values can be interpreted either as unsigned integers or normalized floats.
// TODO I wish I could call the original class `VoxelBuffer` and expose this other one with that name.
// Godot doesn't seem to allow doing that. So the original class had to be named `VoxelBufferInternal`...
// Scripts-facing wrapper around VoxelBufferInternal.
// It is separate because being a Godot object requires to carry more baggage, and because this data type can
// be instanced many times while being rarely accessed directly from scripts, it is a bit better to take this part out
class VoxelBuffer : public Reference {
// TODO Perhaps we could decouple the Godot class and the internals, so we don't always need `Reference`?
GDCLASS(VoxelBuffer, Reference)
public:
enum ChannelId {
CHANNEL_TYPE = 0,
CHANNEL_SDF,
CHANNEL_COLOR,
CHANNEL_INDICES,
CHANNEL_WEIGHTS,
CHANNEL_DATA5,
CHANNEL_DATA6,
CHANNEL_DATA7,
// Arbitrary value, 8 should be enough. Tweak for your needs.
MAX_CHANNELS
CHANNEL_TYPE = VoxelBufferInternal::CHANNEL_TYPE,
CHANNEL_SDF = VoxelBufferInternal::CHANNEL_SDF,
CHANNEL_COLOR = VoxelBufferInternal::CHANNEL_COLOR,
CHANNEL_INDICES = VoxelBufferInternal::CHANNEL_INDICES,
CHANNEL_WEIGHTS = VoxelBufferInternal::CHANNEL_WEIGHTS,
CHANNEL_DATA5 = VoxelBufferInternal::CHANNEL_DATA5,
CHANNEL_DATA6 = VoxelBufferInternal::CHANNEL_DATA6,
CHANNEL_DATA7 = VoxelBufferInternal::CHANNEL_DATA7,
MAX_CHANNELS = VoxelBufferInternal::MAX_CHANNELS,
};
// TODO use C++17 inline to initialize right here...
static const char *CHANNEL_ID_HINT_STRING;
static const int ALL_CHANNELS_MASK = 0xff;
enum Compression {
COMPRESSION_NONE = 0,
COMPRESSION_UNIFORM,
COMPRESSION_NONE = VoxelBufferInternal::COMPRESSION_NONE,
COMPRESSION_UNIFORM = VoxelBufferInternal::COMPRESSION_UNIFORM,
//COMPRESSION_RLE,
COMPRESSION_COUNT
COMPRESSION_COUNT = VoxelBufferInternal::COMPRESSION_COUNT
};
enum Depth {
DEPTH_8_BIT,
DEPTH_16_BIT,
DEPTH_32_BIT,
DEPTH_64_BIT,
DEPTH_COUNT
DEPTH_8_BIT = VoxelBufferInternal::DEPTH_8_BIT,
DEPTH_16_BIT = VoxelBufferInternal::DEPTH_16_BIT,
DEPTH_32_BIT = VoxelBufferInternal::DEPTH_32_BIT,
DEPTH_64_BIT = VoxelBufferInternal::DEPTH_64_BIT,
DEPTH_COUNT = VoxelBufferInternal::DEPTH_COUNT
};
static inline uint32_t get_depth_byte_count(VoxelBuffer::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBuffer::DEPTH_COUNT);
return 1 << d;
}
static inline Depth get_depth_from_size(size_t size) {
switch (size) {
case 1:
return DEPTH_8_BIT;
case 2:
return DEPTH_16_BIT;
case 4:
return DEPTH_32_BIT;
case 8:
return DEPTH_64_BIT;
default:
CRASH_NOW();
}
}
static const Depth DEFAULT_CHANNEL_DEPTH = DEPTH_8_BIT;
static const Depth DEFAULT_TYPE_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_SDF_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_INDICES_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_WEIGHTS_CHANNEL_DEPTH = DEPTH_16_BIT;
// Limit was made explicit for serialization reasons, and also because there must be a reasonable one
static const uint32_t MAX_SIZE = 65535;
struct Channel {
// Allocated when the channel is populated.
// Flat array, in order [z][x][y] because it allows faster vertical-wise access (the engine is Y-up).
uint8_t *data = nullptr;
// Default value when data is null
uint64_t defval = 0;
Depth depth = DEFAULT_CHANNEL_DEPTH;
uint32_t size_in_bytes = 0;
};
VoxelBuffer();
VoxelBuffer(std::shared_ptr<VoxelBufferInternal> &other);
~VoxelBuffer();
void create(unsigned int sx, unsigned int sy, unsigned int sz);
void create(Vector3i size);
inline const VoxelBufferInternal &get_buffer() const { return *_buffer; }
inline VoxelBufferInternal &get_buffer() { return *_buffer; }
//inline std::shared_ptr<VoxelBufferInternal> get_buffer_shared() { return _buffer; }
Vector3 get_size() const {
return _buffer->get_size().to_vec3();
}
int get_size_x() const { return _buffer->get_size().x; }
int get_size_y() const { return _buffer->get_size().x; }
int get_size_z() const { return _buffer->get_size().x; }
void create(int x, int y, int z) {
_buffer->create(x, y, z);
}
void clear();
void clear_channel(unsigned int channel_index, uint64_t clear_value = 0);
void clear_channel_f(unsigned int channel_index, real_t clear_value);
_FORCE_INLINE_ const Vector3i &get_size() const { return _size; }
void set_default_values(FixedArray<uint64_t, VoxelBuffer::MAX_CHANNELS> values);
uint64_t get_voxel(int x, int y, int z, unsigned int channel_index = 0) const;
void set_voxel(uint64_t value, int x, int y, int z, unsigned int channel_index = 0);
real_t get_voxel_f(int x, int y, int z, unsigned int channel_index = 0) const;
void set_voxel_f(real_t value, int x, int y, int z, unsigned int channel_index = 0);
_FORCE_INLINE_ uint64_t get_voxel(const Vector3i pos, unsigned int channel_index = 0) const {
return get_voxel(pos.x, pos.y, pos.z, channel_index);
uint64_t get_voxel(int x, int y, int z, unsigned int channel) const {
return _buffer->get_voxel(x, y, z, channel);
}
_FORCE_INLINE_ void set_voxel(int value, const Vector3i pos, unsigned int channel_index = 0) {
set_voxel(value, pos.x, pos.y, pos.z, channel_index);
void set_voxel(uint64_t value, int x, int y, int z, unsigned int channel) {
_buffer->set_voxel(value, x, y, z, channel);
}
real_t get_voxel_f(int x, int y, int z, unsigned int channel_index) const;
void set_voxel_f(real_t value, int x, int y, int z, unsigned int channel_index);
void set_voxel_v(uint64_t value, Vector3 pos, unsigned int channel_index) {
_buffer->set_voxel(value, pos.x, pos.y, pos.z, channel_index);
}
void copy_channel_from(Ref<VoxelBuffer> other, unsigned int channel);
void copy_channel_from_area(
Ref<VoxelBuffer> other, Vector3 src_min, Vector3 src_max, Vector3 dst_min, unsigned int channel);
void fill(uint64_t defval, unsigned int channel_index = 0);
void fill_area(uint64_t defval, Vector3i min, Vector3i max, unsigned int channel_index = 0);
void fill_area_f(float fvalue, Vector3i min, Vector3i max, unsigned int channel_index);
void fill_f(real_t value, unsigned int channel = 0);
void fill_area(uint64_t defval, Vector3 min, Vector3 max, unsigned int channel_index) {
_buffer->fill_area(defval, Vector3i(min), Vector3i(max), channel_index);
}
bool is_uniform(unsigned int channel_index) const;
void compress_uniform_channels();
void decompress_channel(unsigned int channel_index);
Compression get_channel_compression(unsigned int channel_index) const;
static uint32_t get_size_in_bytes_for_volume(Vector3i size, Depth depth);
void copy_format(const VoxelBuffer &other);
// Specialized copy functions.
// Note: these functions don't include metadata on purpose.
// If you also want to copy metadata, use the specialized functions.
void copy_from(const VoxelBuffer &other);
void copy_from(const VoxelBuffer &other, unsigned int channel_index);
void copy_from(const VoxelBuffer &other, Vector3i src_min, Vector3i src_max, Vector3i dst_min,
unsigned int channel_index);
// Copy a region from a box of values, passed as a raw array.
// `src_size` is the total 3D size of the source box.
// `src_min` and `src_max` are the sub-region of that box we want to copy.
// `dst_min` is the lower corner where we want the data to be copied into the destination.
template <typename T>
void copy_from(Span<const T> src, Vector3i src_size, Vector3i src_min, Vector3i src_max, Vector3i dst_min,
unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
// Size of source and destination values must match
ERR_FAIL_COND(channel.depth != get_depth_from_size(sizeof(T)));
#endif
// This function always decompresses the destination.
// To keep it compressed, either check what you are about to copy,
// or schedule a recompression for later.
decompress_channel(channel_index);
Span<T> dst(static_cast<T *>(channel.data), channel.size_in_bytes / sizeof(T));
copy_3d_region_zxy<T>(dst, _size, dst_min, src, src_size, src_min, src_max);
}
// Copy a region of the data into a dense buffer.
// If the source is compressed, it is decompressed.
// `dst` is a raw array storing grid values in a box.
// `dst_size` is the total size of the box.
// `dst_min` is the lower corner of where we want the source data to be stored.
// `src_min` and `src_max` is the sub-region of the source we want to copy.
template <typename T>
void copy_to(Span<T> dst, Vector3i dst_size, Vector3i dst_min, Vector3i src_min, Vector3i src_max,
unsigned int channel_index) const {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
// Size of source and destination values must match
ERR_FAIL_COND(channel.depth != get_depth_from_size(sizeof(T)));
#endif
if (channel.data == nullptr) {
fill_3d_region_zxy<T>(dst, dst_size, dst_min, dst_min + (src_max - src_min), channel.defval);
} else {
Span<const T> src(static_cast<const T *>(channel.data), channel.size_in_bytes / sizeof(T));
copy_3d_region_zxy<T>(dst, dst_size, dst_min, src, _size, src_min, src_max);
}
}
// TODO Deprecate?
// Executes a read-write action on all cells of the provided box that intersect with this buffer.
// `action_func` receives a voxel value from the channel, and returns a modified value.
// if the returned value is different, it will be applied to the buffer.
// Can be used to blend voxels together.
template <typename F>
inline void read_write_action(Box3i box, unsigned int channel_index, F action_func) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
box.clip(Box3i(Vector3i(), _size));
Vector3i min_pos = box.pos;
Vector3i max_pos = box.pos + box.size;
Vector3i pos;
for (pos.z = min_pos.z; pos.z < max_pos.z; ++pos.z) {
for (pos.x = min_pos.x; pos.x < max_pos.x; ++pos.x) {
for (pos.y = min_pos.y; pos.y < max_pos.y; ++pos.y) {
// TODO Optimization: a bunch of checks and branching could be skipped
const uint64_t v0 = get_voxel(pos, channel_index);
const uint64_t v1 = action_func(pos, v0);
if (v0 != v1) {
set_voxel(v1, pos, channel_index);
}
}
}
}
}
static _FORCE_INLINE_ size_t get_index(const Vector3i pos, const Vector3i size) {
return pos.get_zxy_index(size);
}
_FORCE_INLINE_ size_t get_index(unsigned int x, unsigned int y, unsigned int z) const {
return y + _size.y * (x + _size.x * z); // ZXY index
}
template <typename F>
inline void for_each_index_and_pos(const Box3i &box, F f) {
const Vector3i min_pos = box.pos;
const Vector3i max_pos = box.pos + box.size;
Vector3i pos;
for (pos.z = min_pos.z; pos.z < max_pos.z; ++pos.z) {
for (pos.x = min_pos.x; pos.x < max_pos.x; ++pos.x) {
pos.y = min_pos.y;
size_t i = get_index(pos.x, pos.y, pos.z);
for (; pos.y < max_pos.y; ++pos.y) {
f(i, pos);
++i;
}
}
}
}
// Data_T action_func(Vector3i pos, Data_T in_v)
template <typename F, typename Data_T>
void write_box_template(const Box3i &box, unsigned int channel_index, F action_func, Vector3i offset) {
decompress_channel(channel_index);
Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
ERR_FAIL_COND(!Box3i(Vector3i(), _size).contains(box));
ERR_FAIL_COND(get_depth_byte_count(channel.depth) != sizeof(Data_T));
#endif
Span<Data_T> data = Span<uint8_t>(channel.data, channel.size_in_bytes)
.reinterpret_cast_to<Data_T>();
// `&` is required because lambda captures are `const` by default and `mutable` can be used only from C++23
for_each_index_and_pos(box, [&data, action_func, offset](size_t i, Vector3i pos) {
data.set(i, action_func(pos + offset, data[i]));
});
}
// void action_func(Vector3i pos, Data0_T &inout_v0, Data1_T &inout_v1)
template <typename F, typename Data0_T, typename Data1_T>
void write_box_2_template(const Box3i &box, unsigned int channel_index0, unsigned channel_index1, F action_func,
Vector3i offset) {
decompress_channel(channel_index0);
decompress_channel(channel_index1);
Channel &channel0 = _channels[channel_index0];
Channel &channel1 = _channels[channel_index1];
#ifdef DEBUG_ENABLED
ERR_FAIL_COND(!Box3i(Vector3i(), _size).contains(box));
ERR_FAIL_COND(get_depth_byte_count(channel0.depth) != sizeof(Data0_T));
ERR_FAIL_COND(get_depth_byte_count(channel1.depth) != sizeof(Data1_T));
#endif
Span<Data0_T> data0 = Span<uint8_t>(channel0.data, channel0.size_in_bytes)
.reinterpret_cast_to<Data0_T>();
Span<Data1_T> data1 = Span<uint8_t>(channel1.data, channel1.size_in_bytes)
.reinterpret_cast_to<Data1_T>();
for_each_index_and_pos(box, [action_func, offset, &data0, &data1](size_t i, Vector3i pos) {
// TODO The caller must still specify exactly the correct type, maybe some conversion could be used
action_func(pos + offset, data0[i], data1[i]);
});
}
template <typename F>
void write_box(const Box3i &box, unsigned int channel_index, F action_func, Vector3i offset) {
#ifdef DEBUG_ENABLED
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
#endif
const Channel &channel = _channels[channel_index];
switch (channel.depth) {
case DEPTH_8_BIT:
write_box_template<F, uint8_t>(box, channel_index, action_func, offset);
break;
case DEPTH_16_BIT:
write_box_template<F, uint16_t>(box, channel_index, action_func, offset);
break;
case DEPTH_32_BIT:
write_box_template<F, uint32_t>(box, channel_index, action_func, offset);
break;
case DEPTH_64_BIT:
write_box_template<F, uint64_t>(box, channel_index, action_func, offset);
break;
default:
ERR_FAIL();
break;
}
}
/*template <typename F>
void write_box_2(const Box3i &box, unsigned int channel_index0, unsigned int channel_index1, F action_func,
Vector3i offset) {
#ifdef DEBUG_ENABLED
ERR_FAIL_INDEX(channel_index0, MAX_CHANNELS);
ERR_FAIL_INDEX(channel_index1, MAX_CHANNELS);
#endif
const Channel &channel0 = _channels[channel_index0];
const Channel &channel1 = _channels[channel_index1];
#ifdef DEBUG_ENABLED
// TODO Find a better way to handle combination explosion. For now I allow only what's really used.
ERR_FAIL_COND_MSG(channel1.depth != DEPTH_16_BIT, "Second channel depth is hardcoded to 16 for now");
#endif
switch (channel.depth) {
case DEPTH_8_BIT:
write_box_2_template<F, uint8_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_16_BIT:
write_box_2_template<F, uint16_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_32_BIT:
write_box_2_template<F, uint32_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_64_BIT:
write_box_2_template<F, uint64_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
default:
ERR_FAIL();
break;
}
}*/
static inline FixedArray<uint8_t, MAX_CHANNELS> mask_to_channels_list(
uint8_t channels_mask, unsigned int &out_count) {
FixedArray<uint8_t, VoxelBuffer::MAX_CHANNELS> channels;
unsigned int channel_count = 0;
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS; ++channel_index) {
if (((1 << channel_index) & channels_mask) != 0) {
channels[channel_count] = channel_index;
++channel_count;
}
}
out_count = channel_count;
return channels;
}
void downscale_to(Ref<VoxelBuffer> dst, Vector3 src_min, Vector3 src_max, Vector3 dst_min) const;
Ref<VoxelBuffer> duplicate(bool include_metadata) const;
_FORCE_INLINE_ bool is_position_valid(unsigned int x, unsigned int y, unsigned int z) const {
return x < (unsigned)_size.x && y < (unsigned)_size.y && z < (unsigned)_size.z;
}
_FORCE_INLINE_ bool is_position_valid(const Vector3i pos) const {
return is_position_valid(pos.x, pos.y, pos.z);
}
_FORCE_INLINE_ bool is_box_valid(const Box3i box) const {
return Box3i(Vector3i(), _size).contains(box);
}
_FORCE_INLINE_ uint64_t get_volume() const {
return _size.volume();
}
// TODO Have a template version based on channel depth
bool get_channel_raw(unsigned int channel_index, Span<uint8_t> &slice) const;
void downscale_to(VoxelBuffer &dst, Vector3i src_min, Vector3i src_max, Vector3i dst_min) const;
Ref<VoxelTool> get_voxel_tool();
bool equals(const VoxelBuffer &p_other) const;
void set_channel_depth(unsigned int channel_index, Depth new_depth);
Depth get_channel_depth(unsigned int channel_index) const;
static uint32_t get_depth_bit_count(Depth d);
// When using lower than 32-bit resolution for terrain signed distance fields,
// it should be scaled to better fit the range of represented values since the storage is normalized to -1..1.
@ -397,98 +116,34 @@ public:
// Metadata
Variant get_block_metadata() const { return _block_metadata; }
Variant get_block_metadata() const { return _buffer->get_block_metadata(); }
void set_block_metadata(Variant meta);
Variant get_voxel_metadata(Vector3i pos) const;
void set_voxel_metadata(Vector3i pos, Variant meta);
template <typename F>
void for_each_voxel_metadata_in_area(Box3i box, F callback) const {
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
if (box.contains(elem->key())) {
callback(elem->key(), elem->value());
}
elem = elem->next();
}
Variant get_voxel_metadata(Vector3 pos) const {
return _buffer->get_voxel_metadata(Vector3i(pos));
}
void set_voxel_metadata(Vector3 pos, Variant meta) {
_buffer->set_voxel_metadata(Vector3i(pos), meta);
}
void for_each_voxel_metadata(Ref<FuncRef> callback) const;
void for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Box3i box) const;
void for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Vector3 min_pos, Vector3 max_pos);
void copy_voxel_metadata_in_area(
Ref<VoxelBuffer> src_buffer, Vector3 src_min_pos, Vector3 src_max_pos, Vector3 dst_pos);
void clear_voxel_metadata();
void clear_voxel_metadata_in_area(Box3i box);
void copy_voxel_metadata_in_area(Ref<VoxelBuffer> src_buffer, Box3i src_box, Vector3i dst_origin);
void copy_voxel_metadata(const VoxelBuffer &src_buffer);
const Map<Vector3i, Variant> &get_voxel_metadata() const { return _voxel_metadata; }
// Internal synchronization.
// This lock is optional, and used internally at the moment, only in multithreaded areas.
inline const RWLock &get_lock() const { return _rw_lock; }
inline RWLock &get_lock() { return _rw_lock; }
void clear_voxel_metadata_in_area(Vector3 min_pos, Vector3 max_pos);
// Debugging
Ref<Image> debug_print_sdf_to_image_top_down();
private:
bool create_channel_noinit(int i, Vector3i size);
bool create_channel(int i, Vector3i size, uint64_t defval);
void delete_channel(int i);
static void _bind_methods();
int get_size_x() const { return _size.x; }
int get_size_y() const { return _size.y; }
int get_size_z() const { return _size.z; }
// Bindings
Vector3 _b_get_size() const { return _size.to_vec3(); }
void _b_create(int x, int y, int z) { create(x, y, z); }
uint64_t _b_get_voxel(int x, int y, int z, unsigned int channel) const { return get_voxel(x, y, z, channel); }
void _b_set_voxel(uint64_t value, int x, int y, int z, unsigned int channel) { set_voxel(value, x, y, z, channel); }
void _b_copy_channel_from(Ref<VoxelBuffer> other, unsigned int channel);
void _b_copy_channel_from_area(Ref<VoxelBuffer> other, Vector3 src_min, Vector3 src_max, Vector3 dst_min, unsigned int channel);
void _b_fill_area(uint64_t defval, Vector3 min, Vector3 max, unsigned int channel_index) { fill_area(defval, Vector3i(min), Vector3i(max), channel_index); }
void _b_set_voxel_f(real_t value, int x, int y, int z, unsigned int channel) { set_voxel_f(value, x, y, z, channel); }
void _b_set_voxel_v(uint64_t value, Vector3 pos, unsigned int channel_index = 0) { set_voxel(value, pos.x, pos.y, pos.z, channel_index); }
void _b_downscale_to(Ref<VoxelBuffer> dst, Vector3 src_min, Vector3 src_max, Vector3 dst_min) const;
Variant _b_get_voxel_metadata(Vector3 pos) const { return get_voxel_metadata(Vector3i(pos)); }
void _b_set_voxel_metadata(Vector3 pos, Variant meta) { set_voxel_metadata(Vector3i(pos), meta); }
void _b_for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Vector3 min_pos, Vector3 max_pos);
void _b_clear_voxel_metadata_in_area(Vector3 min_pos, Vector3 max_pos);
void _b_copy_voxel_metadata_in_area(Ref<VoxelBuffer> src_buffer, Vector3 src_min_pos, Vector3 src_max_pos, Vector3 dst_pos);
private:
// Each channel can store arbitary data.
// For example, you can decide to store colors (R, G, B, A), gameplay types (type, state, light) or both.
FixedArray<Channel, MAX_CHANNELS> _channels;
// How many voxels are there in the three directions. All populated channels have the same size.
Vector3i _size;
Variant _block_metadata;
Map<Vector3i, Variant> _voxel_metadata;
// TODO It may be preferable to actually move away from storing an RWLock in every buffer in the future.
// We should be able to find a solution because very few of these locks are actually used at a given time.
// It worked so far on PC but other platforms like the PS5 might have a pretty low limit (8K?)
RWLock _rw_lock;
// Not sure yet if we'll really need shared_ptr or just no pointer
std::shared_ptr<VoxelBufferInternal> _buffer;
};
inline void debug_check_texture_indices_packed_u16(const VoxelBuffer &voxels) {
for (int z = 0; z < voxels.get_size().z; ++z) {
for (int x = 0; x < voxels.get_size().x; ++x) {
for (int y = 0; y < voxels.get_size().y; ++y) {
uint16_t pi = voxels.get_voxel(x, y, z, VoxelBuffer::CHANNEL_INDICES);
FixedArray<uint8_t, 4> indices = decode_indices_from_packed_u16(pi);
debug_check_texture_indices(indices);
}
}
}
}
VARIANT_ENUM_CAST(VoxelBuffer::ChannelId)
VARIANT_ENUM_CAST(VoxelBuffer::Depth)
VARIANT_ENUM_CAST(VoxelBuffer::Compression)

View File

@ -0,0 +1,878 @@
#define VOXEL_BUFFER_USE_MEMORY_POOL
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
#include "voxel_memory_pool.h"
#endif
#include "../util/funcs.h"
#include "../util/profiling.h"
#include "voxel_buffer_internal.h"
#include <core/func_ref.h>
#include <core/image.h>
#include <core/io/marshalls.h>
#include <core/math/math_funcs.h>
#include <string.h>
namespace {
inline uint8_t *allocate_channel_data(size_t size) {
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
return VoxelMemoryPool::get_singleton()->allocate(size);
#else
return (uint8_t *)memalloc(size * sizeof(uint8_t));
#endif
}
inline void free_channel_data(uint8_t *data, uint32_t size) {
#ifdef VOXEL_BUFFER_USE_MEMORY_POOL
VoxelMemoryPool::get_singleton()->recycle(data, size);
#else
memfree(data);
#endif
}
uint64_t g_depth_max_values[] = {
0xff, // 8
0xffff, // 16
0xffffffff, // 32
0xffffffffffffffff // 64
};
inline uint32_t get_depth_bit_count(VoxelBufferInternal::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBufferInternal::DEPTH_COUNT);
return VoxelBufferInternal::get_depth_byte_count(d) << 3;
}
inline uint64_t get_max_value_for_depth(VoxelBufferInternal::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBufferInternal::DEPTH_COUNT);
return g_depth_max_values[d];
}
inline uint64_t clamp_value_for_depth(uint64_t value, VoxelBufferInternal::Depth d) {
const uint64_t max_val = get_max_value_for_depth(d);
if (value >= max_val) {
return max_val;
}
return value;
}
static_assert(sizeof(uint32_t) == sizeof(float), "uint32_t and float cannot be marshalled back and forth");
static_assert(sizeof(uint64_t) == sizeof(double), "uint64_t and double cannot be marshalled back and forth");
inline uint64_t real_to_raw_voxel(real_t value, VoxelBufferInternal::Depth depth) {
switch (depth) {
case VoxelBufferInternal::DEPTH_8_BIT:
return norm_to_u8(value);
case VoxelBufferInternal::DEPTH_16_BIT:
return norm_to_u16(value);
case VoxelBufferInternal::DEPTH_32_BIT: {
MarshallFloat m;
m.f = value;
return m.i;
}
case VoxelBufferInternal::DEPTH_64_BIT: {
MarshallDouble m;
m.d = value;
return m.l;
}
default:
CRASH_NOW();
return 0;
}
}
inline real_t raw_voxel_to_real(uint64_t value, VoxelBufferInternal::Depth depth) {
// Depths below 32 are normalized between -1 and 1
switch (depth) {
case VoxelBufferInternal::DEPTH_8_BIT:
return u8_to_norm(value);
case VoxelBufferInternal::DEPTH_16_BIT:
return u16_to_norm(value);
case VoxelBufferInternal::DEPTH_32_BIT: {
MarshallFloat m;
m.i = value;
return m.f;
}
case VoxelBufferInternal::DEPTH_64_BIT: {
MarshallDouble m;
m.l = value;
return m.d;
}
default:
CRASH_NOW();
return 0;
}
}
} // namespace
VoxelBufferInternal::VoxelBufferInternal() {
// Minecraft uses way more than 255 block types and there is room for eventual metadata such as rotation
_channels[CHANNEL_TYPE].depth = VoxelBufferInternal::DEFAULT_TYPE_CHANNEL_DEPTH;
_channels[CHANNEL_TYPE].defval = 0;
// 16-bit is better on average to handle large worlds
_channels[CHANNEL_SDF].depth = VoxelBufferInternal::DEFAULT_SDF_CHANNEL_DEPTH;
_channels[CHANNEL_SDF].defval = 0xffff;
_channels[CHANNEL_INDICES].depth = VoxelBufferInternal::DEPTH_16_BIT;
_channels[CHANNEL_INDICES].defval = encode_indices_to_packed_u16(0, 1, 2, 3);
_channels[CHANNEL_WEIGHTS].depth = VoxelBufferInternal::DEPTH_16_BIT;
_channels[CHANNEL_WEIGHTS].defval = encode_weights_to_packed_u16(15, 0, 0, 0);
}
VoxelBufferInternal::VoxelBufferInternal(VoxelBufferInternal &&src) {
src.move_to(*this);
}
VoxelBufferInternal::~VoxelBufferInternal() {
clear();
}
VoxelBufferInternal &VoxelBufferInternal::operator=(VoxelBufferInternal &&src) {
src.move_to(*this);
return *this;
}
void VoxelBufferInternal::create(unsigned int sx, unsigned int sy, unsigned int sz) {
ERR_FAIL_COND(sx > MAX_SIZE || sy > MAX_SIZE || sz > MAX_SIZE);
clear_voxel_metadata();
Vector3i new_size(sx, sy, sz);
if (new_size != _size) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
Channel &channel = _channels[i];
if (channel.data) {
// Channel already contained data
delete_channel(i);
ERR_FAIL_COND(!create_channel(i, new_size, channel.defval));
}
}
_size = new_size;
}
}
void VoxelBufferInternal::create(Vector3i size) {
create(size.x, size.y, size.z);
}
void VoxelBufferInternal::clear() {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
Channel &channel = _channels[i];
if (channel.data) {
delete_channel(i);
}
}
_size = Vector3i();
clear_voxel_metadata();
}
void VoxelBufferInternal::clear_channel(unsigned int channel_index, uint64_t clear_value) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
delete_channel(channel_index);
}
channel.defval = clamp_value_for_depth(clear_value, channel.depth);
}
void VoxelBufferInternal::clear_channel_f(unsigned int channel_index, real_t clear_value) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
clear_channel(channel_index, real_to_raw_voxel(clear_value, channel.depth));
}
void VoxelBufferInternal::set_default_values(FixedArray<uint64_t, VoxelBufferInternal::MAX_CHANNELS> values) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
_channels[i].defval = clamp_value_for_depth(values[i], _channels[i].depth);
}
}
uint64_t VoxelBufferInternal::get_voxel(int x, int y, int z, unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, 0);
ERR_FAIL_COND_V_MSG(!is_position_valid(x, y, z), 0, String("At position ({0}, {1}, {2})").format(varray(x, y, z)));
const Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
const uint32_t i = get_index(x, y, z);
switch (channel.depth) {
case DEPTH_8_BIT:
return channel.data[i];
case DEPTH_16_BIT:
return reinterpret_cast<uint16_t *>(channel.data)[i];
case DEPTH_32_BIT:
return reinterpret_cast<uint32_t *>(channel.data)[i];
case DEPTH_64_BIT:
return reinterpret_cast<uint64_t *>(channel.data)[i];
default:
CRASH_NOW();
return 0;
}
} else {
return channel.defval;
}
}
void VoxelBufferInternal::set_voxel(uint64_t value, int x, int y, int z, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_COND_MSG(!is_position_valid(x, y, z), String("At position ({0}, {1}, {2})").format(varray(x, y, z)));
Channel &channel = _channels[channel_index];
value = clamp_value_for_depth(value, channel.depth);
bool do_set = true;
if (channel.data == nullptr) {
if (channel.defval != value) {
// Allocate channel with same initial values as defval
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
} else {
do_set = false;
}
}
if (do_set) {
const uint32_t i = get_index(x, y, z);
switch (channel.depth) {
case DEPTH_8_BIT:
channel.data[i] = value;
break;
case DEPTH_16_BIT:
reinterpret_cast<uint16_t *>(channel.data)[i] = value;
break;
case DEPTH_32_BIT:
reinterpret_cast<uint32_t *>(channel.data)[i] = value;
break;
case DEPTH_64_BIT:
reinterpret_cast<uint64_t *>(channel.data)[i] = value;
break;
default:
CRASH_NOW();
break;
}
}
}
real_t VoxelBufferInternal::get_voxel_f(int x, int y, int z, unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, 0);
return raw_voxel_to_real(get_voxel(x, y, z, channel_index), _channels[channel_index].depth);
}
void VoxelBufferInternal::set_voxel_f(real_t value, int x, int y, int z, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
set_voxel(real_to_raw_voxel(value, _channels[channel_index].depth), x, y, z, channel_index);
}
void VoxelBufferInternal::fill(uint64_t defval, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
defval = clamp_value_for_depth(defval, channel.depth);
if (channel.data == nullptr) {
// Channel is already optimized and uniform
if (channel.defval == defval) {
// No change
return;
} else {
// Just change default value
channel.defval = defval;
return;
}
}
const size_t volume = get_volume();
switch (channel.depth) {
case DEPTH_8_BIT:
memset(channel.data, defval, channel.size_in_bytes);
break;
case DEPTH_16_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint16_t *>(channel.data)[i] = defval;
}
break;
case DEPTH_32_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint32_t *>(channel.data)[i] = defval;
}
break;
case DEPTH_64_BIT:
for (size_t i = 0; i < volume; ++i) {
reinterpret_cast<uint64_t *>(channel.data)[i] = defval;
}
break;
default:
CRASH_NOW();
break;
}
}
void VoxelBufferInternal::fill_area(uint64_t defval, Vector3i min, Vector3i max, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Vector3i::sort_min_max(min, max);
min.clamp_to(Vector3i(0, 0, 0), _size + Vector3i(1, 1, 1));
max.clamp_to(Vector3i(0, 0, 0), _size + Vector3i(1, 1, 1));
const Vector3i area_size = max - min;
if (area_size.x == 0 || area_size.y == 0 || area_size.z == 0) {
return;
}
Channel &channel = _channels[channel_index];
defval = clamp_value_for_depth(defval, channel.depth);
if (channel.data == nullptr) {
if (channel.defval == defval) {
return;
} else {
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
}
Vector3i pos;
const size_t volume = get_volume();
for (pos.z = min.z; pos.z < max.z; ++pos.z) {
for (pos.x = min.x; pos.x < max.x; ++pos.x) {
const size_t dst_ri = get_index(pos.x, pos.y + min.y, pos.z);
CRASH_COND(dst_ri >= volume);
switch (channel.depth) {
case DEPTH_8_BIT:
// Fill row by row
memset(&channel.data[dst_ri], defval, area_size.y * sizeof(uint8_t));
break;
case DEPTH_16_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint16_t *)channel.data)[dst_ri + i] = defval;
}
break;
case DEPTH_32_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint32_t *)channel.data)[dst_ri + i] = defval;
}
break;
case DEPTH_64_BIT:
for (int i = 0; i < area_size.y; ++i) {
((uint64_t *)channel.data)[dst_ri + i] = defval;
}
break;
default:
CRASH_NOW();
break;
}
}
}
}
void VoxelBufferInternal::fill_area_f(float fvalue, Vector3i min, Vector3i max, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
fill_area(real_to_raw_voxel(fvalue, channel.depth), min, max, channel_index);
}
void VoxelBufferInternal::fill_f(real_t value, unsigned int channel) {
ERR_FAIL_INDEX(channel, MAX_CHANNELS);
fill(real_to_raw_voxel(value, _channels[channel].depth), channel);
}
template <typename T>
inline bool is_uniform_b(const uint8_t *data, size_t item_count) {
return is_uniform<T>(reinterpret_cast<const T *>(data), item_count);
}
bool VoxelBufferInternal::is_uniform(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, true);
const Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
// Channel has been optimized
return true;
}
const size_t volume = get_volume();
// Channel isn't optimized, so must look at each voxel
switch (channel.depth) {
case DEPTH_8_BIT:
return ::is_uniform_b<uint8_t>(channel.data, volume);
case DEPTH_16_BIT:
return ::is_uniform_b<uint16_t>(channel.data, volume);
case DEPTH_32_BIT:
return ::is_uniform_b<uint32_t>(channel.data, volume);
case DEPTH_64_BIT:
return ::is_uniform_b<uint64_t>(channel.data, volume);
default:
CRASH_NOW();
break;
}
return true;
}
void VoxelBufferInternal::compress_uniform_channels() {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
if (_channels[i].data != nullptr && is_uniform(i)) {
// TODO More direct way
const uint64_t v = get_voxel(0, 0, 0, i);
clear_channel(i, v);
}
}
}
void VoxelBufferInternal::decompress_channel(unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
}
VoxelBufferInternal::Compression VoxelBufferInternal::get_channel_compression(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, VoxelBufferInternal::COMPRESSION_NONE);
const Channel &channel = _channels[channel_index];
if (channel.data == nullptr) {
return COMPRESSION_UNIFORM;
}
return COMPRESSION_NONE;
}
void VoxelBufferInternal::copy_format(const VoxelBufferInternal &other) {
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
set_channel_depth(i, other.get_channel_depth(i));
}
}
void VoxelBufferInternal::copy_from(const VoxelBufferInternal &other) {
// Copy all channels, assuming sizes and formats match
for (unsigned int i = 0; i < MAX_CHANNELS; ++i) {
copy_from(other, i);
}
}
void VoxelBufferInternal::copy_from(const VoxelBufferInternal &other, unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_COND(other._size != _size);
Channel &channel = _channels[channel_index];
const Channel &other_channel = other._channels[channel_index];
ERR_FAIL_COND(other_channel.depth != channel.depth);
if (other_channel.data != nullptr) {
if (channel.data == nullptr) {
ERR_FAIL_COND(!create_channel_noinit(channel_index, _size));
}
CRASH_COND(channel.size_in_bytes != other_channel.size_in_bytes);
memcpy(channel.data, other_channel.data, channel.size_in_bytes);
} else if (channel.data != nullptr) {
delete_channel(channel_index);
}
channel.defval = other_channel.defval;
channel.depth = other_channel.depth;
}
// TODO Disallow copying from overlapping areas of the same buffer
void VoxelBufferInternal::copy_from(const VoxelBufferInternal &other, Vector3i src_min, Vector3i src_max,
Vector3i dst_min, unsigned int channel_index) {
//
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
Channel &channel = _channels[channel_index];
const Channel &other_channel = other._channels[channel_index];
ERR_FAIL_COND(other_channel.depth != channel.depth);
if (channel.data == nullptr && other_channel.data == nullptr && channel.defval == other_channel.defval) {
// No action needed
return;
}
if (other_channel.data != nullptr) {
if (channel.data == nullptr) {
// Note, we do this even if the pasted data happens to be all the same value as our current channel.
// We assume that this case is not frequent enough to bother, and compression can happen later
ERR_FAIL_COND(!create_channel(channel_index, _size, channel.defval));
}
const unsigned int item_size = get_depth_byte_count(channel.depth);
Span<const uint8_t> src(other_channel.data, other_channel.size_in_bytes);
Span<uint8_t> dst(channel.data, channel.size_in_bytes);
copy_3d_region_zxy(dst, _size, dst_min, src, other._size, src_min, src_max, item_size);
} else if (channel.defval != other_channel.defval) {
// This logic is still required due to how source and destination regions can be specified.
// The actual size of the destination area must be determined from the source area, after it has been clipped.
Vector3i::sort_min_max(src_min, src_max);
clip_copy_region(src_min, src_max, other._size, dst_min, _size);
const Vector3i area_size = src_max - src_min;
if (area_size.x <= 0 || area_size.y <= 0 || area_size.z <= 0) {
// Degenerate area, we'll not copy anything.
return;
}
fill_area(other_channel.defval, dst_min, dst_min + area_size, channel_index);
}
}
void VoxelBufferInternal::duplicate_to(VoxelBufferInternal &dst, bool include_metadata) const {
dst.create(_size);
for (unsigned int i = 0; i < _channels.size(); ++i) {
dst.set_channel_depth(i, _channels[i].depth);
}
dst.copy_from(*this);
if (include_metadata) {
dst.copy_voxel_metadata(*this);
}
}
void VoxelBufferInternal::move_to(VoxelBufferInternal &dst) {
if (this == &dst) {
return;
}
dst.clear();
dst._channels = _channels;
dst._size = _size;
// TODO Optimization: Godot needs move semantics
dst._block_metadata = _block_metadata;
_block_metadata = Variant();
// TODO Optimization: Godot needs move semantics
dst._voxel_metadata = _voxel_metadata;
_voxel_metadata.clear();
for (unsigned int i = 0; i < _channels.size(); ++i) {
Channel &channel = _channels[i];
channel.data = nullptr;
channel.size_in_bytes = 0;
}
}
bool VoxelBufferInternal::get_channel_raw(unsigned int channel_index, Span<uint8_t> &slice) const {
const Channel &channel = _channels[channel_index];
if (channel.data != nullptr) {
slice = Span<uint8_t>(channel.data, 0, channel.size_in_bytes);
return true;
}
slice = Span<uint8_t>();
return false;
}
bool VoxelBufferInternal::create_channel(int i, Vector3i size, uint64_t defval) {
if (!create_channel_noinit(i, size)) {
return false;
}
fill(defval, i);
return true;
}
uint32_t VoxelBufferInternal::get_size_in_bytes_for_volume(Vector3i size, Depth depth) {
// Calculate appropriate size based on bit depth
const unsigned int volume = size.x * size.y * size.z;
const unsigned int bits = volume * ::get_depth_bit_count(depth);
const unsigned int size_in_bytes = (bits >> 3);
return size_in_bytes;
}
bool VoxelBufferInternal::create_channel_noinit(int i, Vector3i size) {
Channel &channel = _channels[i];
size_t size_in_bytes = get_size_in_bytes_for_volume(size, channel.depth);
CRASH_COND(channel.data != nullptr);
channel.data = allocate_channel_data(size_in_bytes);
ERR_FAIL_COND_V(channel.data == nullptr, false);
channel.size_in_bytes = size_in_bytes;
return true;
}
void VoxelBufferInternal::delete_channel(int i) {
Channel &channel = _channels[i];
ERR_FAIL_COND(channel.data == nullptr);
free_channel_data(channel.data, channel.size_in_bytes);
channel.data = nullptr;
channel.size_in_bytes = 0;
}
void VoxelBufferInternal::downscale_to(VoxelBufferInternal &dst, Vector3i src_min, Vector3i src_max,
Vector3i dst_min) const {
// TODO Align input to multiple of two
src_min.clamp_to(Vector3i(), _size);
src_max.clamp_to(Vector3i(), _size + Vector3i(1));
Vector3i dst_max = dst_min + ((src_max - src_min) >> 1);
// TODO This will be wrong if it overlaps the border?
dst_min.clamp_to(Vector3i(), dst._size);
dst_max.clamp_to(Vector3i(), dst._size + Vector3i(1));
for (int channel_index = 0; channel_index < MAX_CHANNELS; ++channel_index) {
const Channel &src_channel = _channels[channel_index];
const Channel &dst_channel = dst._channels[channel_index];
if (src_channel.data == nullptr && dst_channel.data == nullptr && src_channel.defval == dst_channel.defval) {
// No action needed
continue;
}
// Nearest-neighbor downscaling
Vector3i pos;
for (pos.z = dst_min.z; pos.z < dst_max.z; ++pos.z) {
for (pos.x = dst_min.x; pos.x < dst_max.x; ++pos.x) {
for (pos.y = dst_min.y; pos.y < dst_max.y; ++pos.y) {
const Vector3i src_pos = src_min + ((pos - dst_min) << 1);
// TODO Remove check once it works
CRASH_COND(!is_position_valid(src_pos.x, src_pos.y, src_pos.z));
uint64_t v;
if (src_channel.data) {
// TODO Optimized version?
v = get_voxel(src_pos, channel_index);
} else {
v = src_channel.defval;
}
dst.set_voxel(v, pos, channel_index);
}
}
}
}
}
bool VoxelBufferInternal::equals(const VoxelBufferInternal &p_other) const {
if (p_other._size != _size) {
return false;
}
for (int channel_index = 0; channel_index < MAX_CHANNELS; ++channel_index) {
const Channel &channel = _channels[channel_index];
const Channel &other_channel = p_other._channels[channel_index];
if ((channel.data == nullptr) != (other_channel.data == nullptr)) {
// Note: they could still logically be equal if one channel contains uniform voxel memory
return false;
}
if (channel.depth != other_channel.depth) {
return false;
}
if (channel.data == nullptr) {
if (channel.defval != other_channel.defval) {
return false;
}
} else {
ERR_FAIL_COND_V(channel.size_in_bytes != other_channel.size_in_bytes, false);
for (size_t i = 0; i < channel.size_in_bytes; ++i) {
if (channel.data[i] != other_channel.data[i]) {
return false;
}
}
}
}
return true;
}
void VoxelBufferInternal::set_channel_depth(unsigned int channel_index, Depth new_depth) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
ERR_FAIL_INDEX(new_depth, DEPTH_COUNT);
Channel &channel = _channels[channel_index];
if (channel.depth == new_depth) {
return;
}
if (channel.data != nullptr) {
// TODO Implement conversion and do it when specified
WARN_PRINT("Changing VoxelBuffer depth with present data, this will reset the channel");
delete_channel(channel_index);
}
channel.defval = clamp_value_for_depth(channel.defval, new_depth);
channel.depth = new_depth;
}
VoxelBufferInternal::Depth VoxelBufferInternal::get_channel_depth(unsigned int channel_index) const {
ERR_FAIL_INDEX_V(channel_index, MAX_CHANNELS, DEPTH_8_BIT);
return _channels[channel_index].depth;
}
uint32_t VoxelBufferInternal::get_depth_bit_count(Depth d) {
return ::get_depth_bit_count(d);
}
float VoxelBufferInternal::get_sdf_quantization_scale(Depth d) {
switch (d) {
// Normalized
case DEPTH_8_BIT:
return VoxelConstants::QUANTIZED_SDF_8_BITS_SCALE;
case DEPTH_16_BIT:
return VoxelConstants::QUANTIZED_SDF_16_BITS_SCALE;
// Direct
default:
return 1.f;
}
}
void VoxelBufferInternal::set_block_metadata(Variant meta) {
_block_metadata = meta;
}
Variant VoxelBufferInternal::get_voxel_metadata(Vector3i pos) const {
ERR_FAIL_COND_V(!is_position_valid(pos), Variant());
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.find(pos);
if (elem != nullptr) {
return elem->value();
} else {
return Variant();
}
}
void VoxelBufferInternal::set_voxel_metadata(Vector3i pos, Variant meta) {
ERR_FAIL_COND(!is_position_valid(pos));
if (meta.get_type() == Variant::NIL) {
_voxel_metadata.erase(pos);
} else {
_voxel_metadata[pos] = meta;
}
}
void VoxelBufferInternal::for_each_voxel_metadata(Ref<FuncRef> callback) const {
ERR_FAIL_COND(callback.is_null());
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
const Variant key = elem->key().to_vec3();
const Variant *args[2] = { &key, &elem->value() };
Variant::CallError err;
callback->call_func(args, 2, err);
ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK,
String("FuncRef call failed at {0}").format(varray(key)));
// TODO Can't provide detailed error because FuncRef doesn't give us access to the object
// ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK, false,
// Variant::get_call_error_text(callback->get_object(), method_name, nullptr, 0, err));
elem = elem->next();
}
}
void VoxelBufferInternal::for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Box3i box) const {
ERR_FAIL_COND(callback.is_null());
for_each_voxel_metadata_in_area(box, [&callback](Vector3i pos, Variant meta) {
const Variant key = pos.to_vec3();
const Variant *args[2] = { &key, &meta };
Variant::CallError err;
callback->call_func(args, 2, err);
ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK,
String("FuncRef call failed at {0}").format(varray(key)));
// TODO Can't provide detailed error because FuncRef doesn't give us access to the object
// ERR_FAIL_COND_MSG(err.error != Variant::CallError::CALL_OK, false,
// Variant::get_call_error_text(callback->get_object(), method_name, nullptr, 0, err));
});
}
void VoxelBufferInternal::clear_voxel_metadata() {
_voxel_metadata.clear();
}
void VoxelBufferInternal::clear_voxel_metadata_in_area(Box3i box) {
Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
Map<Vector3i, Variant>::Element *next_elem = elem->next();
if (box.contains(elem->key())) {
_voxel_metadata.erase(elem);
}
elem = next_elem;
}
}
void VoxelBufferInternal::copy_voxel_metadata_in_area(const VoxelBufferInternal &src_buffer, Box3i src_box,
Vector3i dst_origin) {
ERR_FAIL_COND(!src_buffer.is_box_valid(src_box));
const Box3i clipped_src_box = src_box.clipped(Box3i(src_box.pos - dst_origin, _size));
const Vector3i clipped_dst_offset = dst_origin + clipped_src_box.pos - src_box.pos;
const Map<Vector3i, Variant>::Element *elem = src_buffer._voxel_metadata.front();
while (elem != nullptr) {
const Vector3i src_pos = elem->key();
if (src_box.contains(src_pos)) {
const Vector3i dst_pos = src_pos + clipped_dst_offset;
CRASH_COND(!is_position_valid(dst_pos));
_voxel_metadata[dst_pos] = elem->value().duplicate();
}
elem = elem->next();
}
}
void VoxelBufferInternal::copy_voxel_metadata(const VoxelBufferInternal &src_buffer) {
ERR_FAIL_COND(src_buffer.get_size() != _size);
const Map<Vector3i, Variant>::Element *elem = src_buffer._voxel_metadata.front();
while (elem != nullptr) {
const Vector3i pos = elem->key();
_voxel_metadata[pos] = elem->value().duplicate();
elem = elem->next();
}
_block_metadata = src_buffer._block_metadata.duplicate();
}
Ref<Image> VoxelBufferInternal::debug_print_sdf_to_image_top_down() {
Image *im = memnew(Image);
im->create(_size.x, _size.z, false, Image::FORMAT_RGB8);
im->lock();
Vector3i pos;
for (pos.z = 0; pos.z < _size.z; ++pos.z) {
for (pos.x = 0; pos.x < _size.x; ++pos.x) {
for (pos.y = _size.y - 1; pos.y >= 0; --pos.y) {
float v = get_voxel_f(pos.x, pos.y, pos.z, CHANNEL_SDF);
if (v < 0.0) {
break;
}
}
float h = pos.y;
float c = h / _size.y;
im->set_pixel(pos.x, pos.z, Color(c, c, c));
}
}
im->unlock();
return Ref<Image>(im);
}

View File

@ -0,0 +1,468 @@
#ifndef VOXEL_BUFFER_INTERNAL_H
#define VOXEL_BUFFER_INTERNAL_H
#include "../constants/voxel_constants.h"
#include "../util/fixed_array.h"
#include "../util/math/box3i.h"
#include "../util/span.h"
#include "funcs.h"
#include <core/map.h>
#include <core/reference.h>
#include <core/vector.h>
class VoxelTool;
class Image;
class FuncRef;
// Dense voxels data storage.
// Organized in channels of configurable bit depth.
// Values can be interpreted either as unsigned integers or normalized floats.
class VoxelBufferInternal {
public:
enum ChannelId {
CHANNEL_TYPE = 0,
CHANNEL_SDF,
CHANNEL_COLOR,
CHANNEL_INDICES,
CHANNEL_WEIGHTS,
CHANNEL_DATA5,
CHANNEL_DATA6,
CHANNEL_DATA7,
// Arbitrary value, 8 should be enough. Tweak for your needs.
MAX_CHANNELS
};
static const int ALL_CHANNELS_MASK = 0xff;
enum Compression {
COMPRESSION_NONE = 0,
COMPRESSION_UNIFORM,
//COMPRESSION_RLE,
COMPRESSION_COUNT
};
enum Depth {
DEPTH_8_BIT,
DEPTH_16_BIT,
DEPTH_32_BIT,
DEPTH_64_BIT,
DEPTH_COUNT
};
static inline uint32_t get_depth_byte_count(VoxelBufferInternal::Depth d) {
CRASH_COND(d < 0 || d >= VoxelBufferInternal::DEPTH_COUNT);
return 1 << d;
}
static inline Depth get_depth_from_size(size_t size) {
switch (size) {
case 1:
return DEPTH_8_BIT;
case 2:
return DEPTH_16_BIT;
case 4:
return DEPTH_32_BIT;
case 8:
return DEPTH_64_BIT;
default:
CRASH_NOW();
}
}
static const Depth DEFAULT_CHANNEL_DEPTH = DEPTH_8_BIT;
static const Depth DEFAULT_TYPE_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_SDF_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_INDICES_CHANNEL_DEPTH = DEPTH_16_BIT;
static const Depth DEFAULT_WEIGHTS_CHANNEL_DEPTH = DEPTH_16_BIT;
// Limit was made explicit for serialization reasons, and also because there must be a reasonable one
static const uint32_t MAX_SIZE = 65535;
struct Channel {
// Allocated when the channel is populated.
// Flat array, in order [z][x][y] because it allows faster vertical-wise access (the engine is Y-up).
uint8_t *data = nullptr;
// Default value when data is null
uint64_t defval = 0;
Depth depth = DEFAULT_CHANNEL_DEPTH;
uint32_t size_in_bytes = 0;
};
VoxelBufferInternal();
VoxelBufferInternal(VoxelBufferInternal &&src);
~VoxelBufferInternal();
VoxelBufferInternal &operator=(VoxelBufferInternal &&src);
void create(unsigned int sx, unsigned int sy, unsigned int sz);
void create(Vector3i size);
void clear();
void clear_channel(unsigned int channel_index, uint64_t clear_value = 0);
void clear_channel_f(unsigned int channel_index, real_t clear_value);
_FORCE_INLINE_ const Vector3i &get_size() const { return _size; }
void set_default_values(FixedArray<uint64_t, VoxelBufferInternal::MAX_CHANNELS> values);
uint64_t get_voxel(int x, int y, int z, unsigned int channel_index = 0) const;
void set_voxel(uint64_t value, int x, int y, int z, unsigned int channel_index = 0);
real_t get_voxel_f(int x, int y, int z, unsigned int channel_index = 0) const;
void set_voxel_f(real_t value, int x, int y, int z, unsigned int channel_index = 0);
_FORCE_INLINE_ uint64_t get_voxel(const Vector3i pos, unsigned int channel_index = 0) const {
return get_voxel(pos.x, pos.y, pos.z, channel_index);
}
_FORCE_INLINE_ void set_voxel(int value, const Vector3i pos, unsigned int channel_index = 0) {
set_voxel(value, pos.x, pos.y, pos.z, channel_index);
}
void fill(uint64_t defval, unsigned int channel_index = 0);
void fill_area(uint64_t defval, Vector3i min, Vector3i max, unsigned int channel_index = 0);
void fill_area_f(float fvalue, Vector3i min, Vector3i max, unsigned int channel_index);
void fill_f(real_t value, unsigned int channel = 0);
bool is_uniform(unsigned int channel_index) const;
void compress_uniform_channels();
void decompress_channel(unsigned int channel_index);
Compression get_channel_compression(unsigned int channel_index) const;
static uint32_t get_size_in_bytes_for_volume(Vector3i size, Depth depth);
void copy_format(const VoxelBufferInternal &other);
// Specialized copy functions.
// Note: these functions don't include metadata on purpose.
// If you also want to copy metadata, use the specialized functions.
void copy_from(const VoxelBufferInternal &other);
void copy_from(const VoxelBufferInternal &other, unsigned int channel_index);
void copy_from(const VoxelBufferInternal &other, Vector3i src_min, Vector3i src_max, Vector3i dst_min,
unsigned int channel_index);
// Copy a region from a box of values, passed as a raw array.
// `src_size` is the total 3D size of the source box.
// `src_min` and `src_max` are the sub-region of that box we want to copy.
// `dst_min` is the lower corner where we want the data to be copied into the destination.
template <typename T>
void copy_from(Span<const T> src, Vector3i src_size, Vector3i src_min, Vector3i src_max, Vector3i dst_min,
unsigned int channel_index) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
// Size of source and destination values must match
ERR_FAIL_COND(channel.depth != get_depth_from_size(sizeof(T)));
#endif
// This function always decompresses the destination.
// To keep it compressed, either check what you are about to copy,
// or schedule a recompression for later.
decompress_channel(channel_index);
Span<T> dst(static_cast<T *>(channel.data), channel.size_in_bytes / sizeof(T));
copy_3d_region_zxy<T>(dst, _size, dst_min, src, src_size, src_min, src_max);
}
// Copy a region of the data into a dense buffer.
// If the source is compressed, it is decompressed.
// `dst` is a raw array storing grid values in a box.
// `dst_size` is the total size of the box.
// `dst_min` is the lower corner of where we want the source data to be stored.
// `src_min` and `src_max` is the sub-region of the source we want to copy.
template <typename T>
void copy_to(Span<T> dst, Vector3i dst_size, Vector3i dst_min, Vector3i src_min, Vector3i src_max,
unsigned int channel_index) const {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
const Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
// Size of source and destination values must match
ERR_FAIL_COND(channel.depth != get_depth_from_size(sizeof(T)));
#endif
if (channel.data == nullptr) {
fill_3d_region_zxy<T>(dst, dst_size, dst_min, dst_min + (src_max - src_min), channel.defval);
} else {
Span<const T> src(static_cast<const T *>(channel.data), channel.size_in_bytes / sizeof(T));
copy_3d_region_zxy<T>(dst, dst_size, dst_min, src, _size, src_min, src_max);
}
}
// TODO Deprecate?
// Executes a read-write action on all cells of the provided box that intersect with this buffer.
// `action_func` receives a voxel value from the channel, and returns a modified value.
// if the returned value is different, it will be applied to the buffer.
// Can be used to blend voxels together.
template <typename F>
inline void read_write_action(Box3i box, unsigned int channel_index, F action_func) {
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
box.clip(Box3i(Vector3i(), _size));
Vector3i min_pos = box.pos;
Vector3i max_pos = box.pos + box.size;
Vector3i pos;
for (pos.z = min_pos.z; pos.z < max_pos.z; ++pos.z) {
for (pos.x = min_pos.x; pos.x < max_pos.x; ++pos.x) {
for (pos.y = min_pos.y; pos.y < max_pos.y; ++pos.y) {
// TODO Optimization: a bunch of checks and branching could be skipped
const uint64_t v0 = get_voxel(pos, channel_index);
const uint64_t v1 = action_func(pos, v0);
if (v0 != v1) {
set_voxel(v1, pos, channel_index);
}
}
}
}
}
static _FORCE_INLINE_ size_t get_index(const Vector3i pos, const Vector3i size) {
return pos.get_zxy_index(size);
}
_FORCE_INLINE_ size_t get_index(unsigned int x, unsigned int y, unsigned int z) const {
return y + _size.y * (x + _size.x * z); // ZXY index
}
template <typename F>
inline void for_each_index_and_pos(const Box3i &box, F f) {
const Vector3i min_pos = box.pos;
const Vector3i max_pos = box.pos + box.size;
Vector3i pos;
for (pos.z = min_pos.z; pos.z < max_pos.z; ++pos.z) {
for (pos.x = min_pos.x; pos.x < max_pos.x; ++pos.x) {
pos.y = min_pos.y;
size_t i = get_index(pos.x, pos.y, pos.z);
for (; pos.y < max_pos.y; ++pos.y) {
f(i, pos);
++i;
}
}
}
}
// Data_T action_func(Vector3i pos, Data_T in_v)
template <typename F, typename Data_T>
void write_box_template(const Box3i &box, unsigned int channel_index, F action_func, Vector3i offset) {
decompress_channel(channel_index);
Channel &channel = _channels[channel_index];
#ifdef DEBUG_ENABLED
ERR_FAIL_COND(!Box3i(Vector3i(), _size).contains(box));
ERR_FAIL_COND(get_depth_byte_count(channel.depth) != sizeof(Data_T));
#endif
Span<Data_T> data = Span<uint8_t>(channel.data, channel.size_in_bytes)
.reinterpret_cast_to<Data_T>();
// `&` is required because lambda captures are `const` by default and `mutable` can be used only from C++23
for_each_index_and_pos(box, [&data, action_func, offset](size_t i, Vector3i pos) {
data.set(i, action_func(pos + offset, data[i]));
});
}
// void action_func(Vector3i pos, Data0_T &inout_v0, Data1_T &inout_v1)
template <typename F, typename Data0_T, typename Data1_T>
void write_box_2_template(const Box3i &box, unsigned int channel_index0, unsigned channel_index1, F action_func,
Vector3i offset) {
decompress_channel(channel_index0);
decompress_channel(channel_index1);
Channel &channel0 = _channels[channel_index0];
Channel &channel1 = _channels[channel_index1];
#ifdef DEBUG_ENABLED
ERR_FAIL_COND(!Box3i(Vector3i(), _size).contains(box));
ERR_FAIL_COND(get_depth_byte_count(channel0.depth) != sizeof(Data0_T));
ERR_FAIL_COND(get_depth_byte_count(channel1.depth) != sizeof(Data1_T));
#endif
Span<Data0_T> data0 = Span<uint8_t>(channel0.data, channel0.size_in_bytes)
.reinterpret_cast_to<Data0_T>();
Span<Data1_T> data1 = Span<uint8_t>(channel1.data, channel1.size_in_bytes)
.reinterpret_cast_to<Data1_T>();
for_each_index_and_pos(box, [action_func, offset, &data0, &data1](size_t i, Vector3i pos) {
// TODO The caller must still specify exactly the correct type, maybe some conversion could be used
action_func(pos + offset, data0[i], data1[i]);
});
}
template <typename F>
void write_box(const Box3i &box, unsigned int channel_index, F action_func, Vector3i offset) {
#ifdef DEBUG_ENABLED
ERR_FAIL_INDEX(channel_index, MAX_CHANNELS);
#endif
const Channel &channel = _channels[channel_index];
switch (channel.depth) {
case DEPTH_8_BIT:
write_box_template<F, uint8_t>(box, channel_index, action_func, offset);
break;
case DEPTH_16_BIT:
write_box_template<F, uint16_t>(box, channel_index, action_func, offset);
break;
case DEPTH_32_BIT:
write_box_template<F, uint32_t>(box, channel_index, action_func, offset);
break;
case DEPTH_64_BIT:
write_box_template<F, uint64_t>(box, channel_index, action_func, offset);
break;
default:
ERR_FAIL();
break;
}
}
/*template <typename F>
void write_box_2(const Box3i &box, unsigned int channel_index0, unsigned int channel_index1, F action_func,
Vector3i offset) {
#ifdef DEBUG_ENABLED
ERR_FAIL_INDEX(channel_index0, MAX_CHANNELS);
ERR_FAIL_INDEX(channel_index1, MAX_CHANNELS);
#endif
const Channel &channel0 = _channels[channel_index0];
const Channel &channel1 = _channels[channel_index1];
#ifdef DEBUG_ENABLED
// TODO Find a better way to handle combination explosion. For now I allow only what's really used.
ERR_FAIL_COND_MSG(channel1.depth != DEPTH_16_BIT, "Second channel depth is hardcoded to 16 for now");
#endif
switch (channel.depth) {
case DEPTH_8_BIT:
write_box_2_template<F, uint8_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_16_BIT:
write_box_2_template<F, uint16_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_32_BIT:
write_box_2_template<F, uint32_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
case DEPTH_64_BIT:
write_box_2_template<F, uint64_t, uint16_t>(box, channel_index0, channel_index1, action_func, offset);
break;
default:
ERR_FAIL();
break;
}
}*/
static inline FixedArray<uint8_t, MAX_CHANNELS> mask_to_channels_list(
uint8_t channels_mask, unsigned int &out_count) {
FixedArray<uint8_t, VoxelBufferInternal::MAX_CHANNELS> channels;
unsigned int channel_count = 0;
for (unsigned int channel_index = 0; channel_index < VoxelBufferInternal::MAX_CHANNELS; ++channel_index) {
if (((1 << channel_index) & channels_mask) != 0) {
channels[channel_count] = channel_index;
++channel_count;
}
}
out_count = channel_count;
return channels;
}
void duplicate_to(VoxelBufferInternal &dst, bool include_metadata) const;
void move_to(VoxelBufferInternal &dst);
_FORCE_INLINE_ bool is_position_valid(unsigned int x, unsigned int y, unsigned int z) const {
return x < (unsigned)_size.x && y < (unsigned)_size.y && z < (unsigned)_size.z;
}
_FORCE_INLINE_ bool is_position_valid(const Vector3i pos) const {
return is_position_valid(pos.x, pos.y, pos.z);
}
_FORCE_INLINE_ bool is_box_valid(const Box3i box) const {
return Box3i(Vector3i(), _size).contains(box);
}
_FORCE_INLINE_ uint64_t get_volume() const {
return _size.volume();
}
// TODO Have a template version based on channel depth
bool get_channel_raw(unsigned int channel_index, Span<uint8_t> &slice) const;
void downscale_to(VoxelBufferInternal &dst, Vector3i src_min, Vector3i src_max, Vector3i dst_min) const;
bool equals(const VoxelBufferInternal &p_other) const;
void set_channel_depth(unsigned int channel_index, Depth new_depth);
Depth get_channel_depth(unsigned int channel_index) const;
static uint32_t get_depth_bit_count(Depth d);
// When using lower than 32-bit resolution for terrain signed distance fields,
// it should be scaled to better fit the range of represented values since the storage is normalized to -1..1.
// This returns that scale for a given depth configuration.
static float get_sdf_quantization_scale(Depth d);
// Metadata
Variant get_block_metadata() const { return _block_metadata; }
void set_block_metadata(Variant meta);
Variant get_voxel_metadata(Vector3i pos) const;
void set_voxel_metadata(Vector3i pos, Variant meta);
template <typename F>
void for_each_voxel_metadata_in_area(Box3i box, F callback) const {
const Map<Vector3i, Variant>::Element *elem = _voxel_metadata.front();
while (elem != nullptr) {
if (box.contains(elem->key())) {
callback(elem->key(), elem->value());
}
elem = elem->next();
}
}
void for_each_voxel_metadata(Ref<FuncRef> callback) const;
void for_each_voxel_metadata_in_area(Ref<FuncRef> callback, Box3i box) const;
void clear_voxel_metadata();
void clear_voxel_metadata_in_area(Box3i box);
void copy_voxel_metadata_in_area(const VoxelBufferInternal &src_buffer, Box3i src_box, Vector3i dst_origin);
void copy_voxel_metadata(const VoxelBufferInternal &src_buffer);
const Map<Vector3i, Variant> &get_voxel_metadata() const { return _voxel_metadata; }
// Internal synchronization.
// This lock is optional, and used internally at the moment, only in multithreaded areas.
inline const RWLock &get_lock() const { return _rw_lock; }
inline RWLock &get_lock() { return _rw_lock; }
// Debugging
Ref<Image> debug_print_sdf_to_image_top_down();
private:
bool create_channel_noinit(int i, Vector3i size);
bool create_channel(int i, Vector3i size, uint64_t defval);
void delete_channel(int i);
private:
// Each channel can store arbitary data.
// For example, you can decide to store colors (R, G, B, A), gameplay types (type, state, light) or both.
FixedArray<Channel, MAX_CHANNELS> _channels;
// How many voxels are there in the three directions. All populated channels have the same size.
Vector3i _size;
Variant _block_metadata;
Map<Vector3i, Variant> _voxel_metadata;
// TODO It may be preferable to actually move away from storing an RWLock in every buffer in the future.
// We should be able to find a solution because very few of these locks are actually used at a given time.
// It worked so far on PC but other platforms like the PS5 might have a pretty low limit (8K?)
RWLock _rw_lock;
};
inline void debug_check_texture_indices_packed_u16(const VoxelBufferInternal &voxels) {
for (int z = 0; z < voxels.get_size().z; ++z) {
for (int x = 0; x < voxels.get_size().x; ++x) {
for (int y = 0; y < voxels.get_size().y; ++y) {
uint16_t pi = voxels.get_voxel(x, y, z, VoxelBufferInternal::CHANNEL_INDICES);
FixedArray<uint8_t, 4> indices = decode_indices_from_packed_u16(pi);
debug_check_texture_indices(indices);
}
}
}
}
#endif // VOXEL_BUFFER_INTERNAL_H

View File

@ -1,7 +1,7 @@
#ifndef VOXEL_DATA_BLOCK_H
#define VOXEL_DATA_BLOCK_H
#include "../storage/voxel_buffer.h"
#include "../storage/voxel_buffer_internal.h"
#include "../util/macros.h"
#include "voxel_ref_count.h"
@ -12,22 +12,37 @@ public:
const unsigned int lod_index = 0;
VoxelRefCount viewers;
static VoxelDataBlock *create(Vector3i bpos, Ref<VoxelBuffer> buffer, unsigned int size, unsigned int p_lod_index) {
static VoxelDataBlock *create(Vector3i bpos, std::shared_ptr<VoxelBufferInternal> &buffer, unsigned int size,
unsigned int p_lod_index) {
const int bs = size;
ERR_FAIL_COND_V(buffer.is_null(), nullptr);
ERR_FAIL_COND_V(buffer == nullptr, nullptr);
ERR_FAIL_COND_V(buffer->get_size() != Vector3i(bs, bs, bs), nullptr);
return memnew(VoxelDataBlock(bpos, buffer, p_lod_index));
}
Ref<VoxelBuffer> get_voxels() const {
VoxelBufferInternal &get_voxels() {
#ifdef DEBUG_ENABLED
CRASH_COND(_voxels.is_null());
CRASH_COND(_voxels == nullptr);
#endif
return *_voxels;
}
const VoxelBufferInternal &get_voxels_const() const {
#ifdef DEBUG_ENABLED
CRASH_COND(_voxels == nullptr);
#endif
return *_voxels;
}
std::shared_ptr<VoxelBufferInternal> get_voxels_shared() const {
#ifdef DEBUG_ENABLED
CRASH_COND(_voxels == nullptr);
#endif
return _voxels;
}
void set_voxels(Ref<VoxelBuffer> buffer) {
ERR_FAIL_COND(buffer.is_null());
void set_voxels(std::shared_ptr<VoxelBufferInternal> &buffer) {
ERR_FAIL_COND(buffer == nullptr);
_voxels = buffer;
}
@ -53,10 +68,10 @@ public:
}
private:
VoxelDataBlock(Vector3i bpos, Ref<VoxelBuffer> buffer, unsigned int p_lod_index) :
VoxelDataBlock(Vector3i bpos, std::shared_ptr<VoxelBufferInternal> &buffer, unsigned int p_lod_index) :
position(bpos), lod_index(p_lod_index), _voxels(buffer) {}
Ref<VoxelBuffer> _voxels;
std::shared_ptr<VoxelBufferInternal> _voxels;
// The block was edited, which requires its LOD counterparts to be recomputed
bool _needs_lodding = false;

View File

@ -1,6 +1,8 @@
#include "voxel_data_map.h"
#include "../constants/cube_tables.h"
#include "../util/godot/funcs.h"
#include "../util/macros.h"
#include <limits>
VoxelDataMap::VoxelDataMap() :
@ -9,7 +11,7 @@ VoxelDataMap::VoxelDataMap() :
set_block_size_pow2(VoxelConstants::DEFAULT_BLOCK_SIZE_PO2);
_default_voxel.fill(0);
_default_voxel[VoxelBuffer::CHANNEL_SDF] = 255;
_default_voxel[VoxelBufferInternal::CHANNEL_SDF] = 255;
}
VoxelDataMap::~VoxelDataMap() {
@ -48,12 +50,12 @@ int VoxelDataMap::get_voxel(Vector3i pos, unsigned int c) const {
if (block == nullptr) {
return _default_voxel[c];
}
RWLockRead lock(block->get_voxels()->get_lock());
return block->get_voxels()->get_voxel(to_local(pos), c);
RWLockRead lock(block->get_voxels_const().get_lock());
return block->get_voxels_const().get_voxel(to_local(pos), c);
}
VoxelDataBlock *VoxelDataMap::create_default_block(Vector3i bpos) {
Ref<VoxelBuffer> buffer(memnew(VoxelBuffer));
std::shared_ptr<VoxelBufferInternal> buffer = gd_make_shared<VoxelBufferInternal>();
buffer->create(_block_size, _block_size, _block_size);
buffer->set_default_values(_default_voxel);
VoxelDataBlock *block = VoxelDataBlock::create(bpos, buffer, _block_size, _lod_index);
@ -73,8 +75,9 @@ VoxelDataBlock *VoxelDataMap::get_or_create_block_at_voxel_pos(Vector3i pos) {
void VoxelDataMap::set_voxel(int value, Vector3i pos, unsigned int c) {
VoxelDataBlock *block = get_or_create_block_at_voxel_pos(pos);
// TODO If it turns out to be a problem, use CoW
RWLockWrite lock(block->get_voxels()->get_lock());
block->get_voxels()->set_voxel(value, to_local(pos), c);
VoxelBufferInternal &voxels = block->get_voxels();
RWLockWrite lock(voxels.get_lock());
voxels.set_voxel(value, to_local(pos), c);
}
float VoxelDataMap::get_voxel_f(Vector3i pos, unsigned int c) const {
@ -85,24 +88,25 @@ float VoxelDataMap::get_voxel_f(Vector3i pos, unsigned int c) const {
return _default_voxel[c];
}
Vector3i lpos = to_local(pos);
RWLockRead lock(block->get_voxels()->get_lock());
return block->get_voxels()->get_voxel_f(lpos.x, lpos.y, lpos.z, c);
RWLockRead lock(block->get_voxels_const().get_lock());
return block->get_voxels_const().get_voxel_f(lpos.x, lpos.y, lpos.z, c);
}
void VoxelDataMap::set_voxel_f(real_t value, Vector3i pos, unsigned int c) {
VoxelDataBlock *block = get_or_create_block_at_voxel_pos(pos);
Vector3i lpos = to_local(pos);
RWLockWrite lock(block->get_voxels()->get_lock());
block->get_voxels()->set_voxel_f(value, lpos.x, lpos.y, lpos.z, c);
VoxelBufferInternal &voxels = block->get_voxels();
RWLockWrite lock(voxels.get_lock());
voxels.set_voxel_f(value, lpos.x, lpos.y, lpos.z, c);
}
void VoxelDataMap::set_default_voxel(int value, unsigned int channel) {
ERR_FAIL_INDEX(channel, VoxelBuffer::MAX_CHANNELS);
ERR_FAIL_INDEX(channel, VoxelBufferInternal::MAX_CHANNELS);
_default_voxel[channel] = value;
}
int VoxelDataMap::get_default_voxel(unsigned int channel) {
ERR_FAIL_INDEX_V(channel, VoxelBuffer::MAX_CHANNELS, 0);
ERR_FAIL_INDEX_V(channel, VoxelBufferInternal::MAX_CHANNELS, 0);
return _default_voxel[channel];
}
@ -178,11 +182,11 @@ void VoxelDataMap::remove_block_internal(Vector3i bpos, unsigned int index) {
}
}
VoxelDataBlock *VoxelDataMap::set_block_buffer(Vector3i bpos, Ref<VoxelBuffer> buffer) {
ERR_FAIL_COND_V(buffer.is_null(), nullptr);
VoxelDataBlock *VoxelDataMap::set_block_buffer(Vector3i bpos, std::shared_ptr<VoxelBufferInternal> &buffer) {
ERR_FAIL_COND_V(buffer == nullptr, nullptr);
VoxelDataBlock *block = get_block(bpos);
if (block == nullptr) {
block = VoxelDataBlock::create(bpos, *buffer, _block_size, _lod_index);
block = VoxelDataBlock::create(bpos, buffer, _block_size, _lod_index);
set_block(bpos, block);
} else {
block->set_voxels(buffer);
@ -205,7 +209,7 @@ bool VoxelDataMap::is_block_surrounded(Vector3i pos) const {
return true;
}
void VoxelDataMap::copy(Vector3i min_pos, VoxelBuffer &dst_buffer, unsigned int channels_mask) const {
void VoxelDataMap::copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const {
const Vector3i max_pos = min_pos + dst_buffer.get_size();
const Vector3i min_block_pos = voxel_to_block(min_pos);
@ -217,7 +221,7 @@ void VoxelDataMap::copy(Vector3i min_pos, VoxelBuffer &dst_buffer, unsigned int
for (bpos.z = min_block_pos.z; bpos.z < max_block_pos.z; ++bpos.z) {
for (bpos.x = min_block_pos.x; bpos.x < max_block_pos.x; ++bpos.x) {
for (bpos.y = min_block_pos.y; bpos.y < max_block_pos.y; ++bpos.y) {
for (unsigned int channel = 0; channel < VoxelBuffer::MAX_CHANNELS; ++channel) {
for (unsigned int channel = 0; channel < VoxelBufferInternal::MAX_CHANNELS; ++channel) {
if (((1 << channel) & channels_mask) == 0) {
continue;
}
@ -225,7 +229,7 @@ void VoxelDataMap::copy(Vector3i min_pos, VoxelBuffer &dst_buffer, unsigned int
const Vector3i src_block_origin = block_to_voxel(bpos);
if (block != nullptr) {
const VoxelBuffer &src_buffer = **block->get_voxels();
const VoxelBufferInternal &src_buffer = block->get_voxels_const();
dst_buffer.set_channel_depth(channel, src_buffer.get_channel_depth(channel));
@ -253,7 +257,7 @@ void VoxelDataMap::copy(Vector3i min_pos, VoxelBuffer &dst_buffer, unsigned int
}
}
void VoxelDataMap::paste(Vector3i min_pos, VoxelBuffer &src_buffer, unsigned int channels_mask, bool use_mask,
void VoxelDataMap::paste(Vector3i min_pos, VoxelBufferInternal &src_buffer, unsigned int channels_mask, bool use_mask,
uint64_t mask_value, bool create_new_blocks) {
//
const Vector3i max_pos = min_pos + src_buffer.get_size();
@ -265,7 +269,7 @@ void VoxelDataMap::paste(Vector3i min_pos, VoxelBuffer &src_buffer, unsigned int
for (bpos.z = min_block_pos.z; bpos.z < max_block_pos.z; ++bpos.z) {
for (bpos.x = min_block_pos.x; bpos.x < max_block_pos.x; ++bpos.x) {
for (bpos.y = min_block_pos.y; bpos.y < max_block_pos.y; ++bpos.y) {
for (unsigned int channel = 0; channel < VoxelBuffer::MAX_CHANNELS; ++channel) {
for (unsigned int channel = 0; channel < VoxelBufferInternal::MAX_CHANNELS; ++channel) {
if (((1 << channel) & channels_mask) == 0) {
continue;
}
@ -281,7 +285,7 @@ void VoxelDataMap::paste(Vector3i min_pos, VoxelBuffer &src_buffer, unsigned int
const Vector3i dst_block_origin = block_to_voxel(bpos);
VoxelBuffer &dst_buffer = **block->get_voxels();
VoxelBufferInternal &dst_buffer = block->get_voxels();
RWLockWrite lock(dst_buffer.get_lock());
if (use_mask) {

View File

@ -50,20 +50,20 @@ public:
int get_voxel(Vector3i pos, unsigned int c = 0) const;
void set_voxel(int value, Vector3i pos, unsigned int c = 0);
float get_voxel_f(Vector3i pos, unsigned int c = VoxelBuffer::CHANNEL_SDF) const;
void set_voxel_f(real_t value, Vector3i pos, unsigned int c = VoxelBuffer::CHANNEL_SDF);
float get_voxel_f(Vector3i pos, unsigned int c = VoxelBufferInternal::CHANNEL_SDF) const;
void set_voxel_f(real_t value, Vector3i pos, unsigned int c = VoxelBufferInternal::CHANNEL_SDF);
void set_default_voxel(int value, unsigned int channel = 0);
int get_default_voxel(unsigned int channel = 0);
// Gets a copy of all voxels in the area starting at min_pos having the same size as dst_buffer.
void copy(Vector3i min_pos, VoxelBuffer &dst_buffer, unsigned int channels_mask) const;
void copy(Vector3i min_pos, VoxelBufferInternal &dst_buffer, unsigned int channels_mask) const;
void paste(Vector3i min_pos, VoxelBuffer &src_buffer, unsigned int channels_mask, bool use_mask, uint64_t mask_value,
bool create_new_blocks);
void paste(Vector3i min_pos, VoxelBufferInternal &src_buffer, unsigned int channels_mask, bool use_mask,
uint64_t mask_value, bool create_new_blocks);
// Moves the given buffer into a block of the map. The buffer is referenced, no copy is made.
VoxelDataBlock *set_block_buffer(Vector3i bpos, Ref<VoxelBuffer> buffer);
VoxelDataBlock *set_block_buffer(Vector3i bpos, std::shared_ptr<VoxelBufferInternal> &buffer);
struct NoAction {
inline void operator()(VoxelDataBlock *block) {}
@ -127,7 +127,7 @@ public:
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
block->get_voxels()->write_box(local_box, channel, action, block_origin);
block->get_voxels().write_box(local_box, channel, action, block_origin);
}
});
}
@ -143,7 +143,7 @@ public:
const Vector3i block_origin = block_to_voxel(block_pos);
Box3i local_box(voxel_box.pos - block_origin, voxel_box.size);
local_box.clip(Box3i(Vector3i(), block_size));
block->get_voxels()->write_box_2_template<F, uint16_t, uint16_t>(
block->get_voxels().write_box_2_template<F, uint16_t, uint16_t>(
local_box, channel0, channel1, action, block_origin);
}
});
@ -159,7 +159,7 @@ private:
private:
// Voxel values that will be returned if access is out of map bounds
FixedArray<uint64_t, VoxelBuffer::MAX_CHANNELS> _default_voxel;
FixedArray<uint64_t, VoxelBufferInternal::MAX_CHANNELS> _default_voxel;
// Blocks stored with a spatial hash in all 3D directions.
// RELATIONSHIP = 2 because it delivers better performance with this kind of key and hash (less collisions).

View File

@ -33,7 +33,7 @@ bool VoxelRegionFormat::validate() const {
// Test worst case limits (this does not include arbitrary metadata, so it can't be 100% accurrate...)
size_t bytes_per_block = 0;
for (unsigned int i = 0; i < channel_depths.size(); ++i) {
bytes_per_block += VoxelBuffer::get_depth_bit_count(channel_depths[i]) / 8;
bytes_per_block += VoxelBufferInternal::get_depth_bit_count(channel_depths[i]) / 8;
}
bytes_per_block *= Vector3i(1 << block_size_po2).volume();
const size_t sectors_per_block = (bytes_per_block - 1) / sector_size + 1;
@ -44,9 +44,9 @@ bool VoxelRegionFormat::validate() const {
return true;
}
bool VoxelRegionFormat::verify_block(const VoxelBuffer &block) const {
bool VoxelRegionFormat::verify_block(const VoxelBufferInternal &block) const {
ERR_FAIL_COND_V(block.get_size() != Vector3i(1 << block_size_po2), false);
for (unsigned int i = 0; i < VoxelBuffer::MAX_CHANNELS; ++i) {
for (unsigned int i = 0; i < VoxelBufferInternal::MAX_CHANNELS; ++i) {
ERR_FAIL_COND_V(block.get_channel_depth(i) != channel_depths[i], false);
}
return true;
@ -128,8 +128,8 @@ static bool load_header(FileAccess *f, uint8_t &out_version, VoxelRegionFormat &
for (unsigned int i = 0; i < out_format.channel_depths.size(); ++i) {
const uint8_t d = f->get_8();
ERR_FAIL_COND_V(d >= VoxelBuffer::DEPTH_COUNT, false);
out_format.channel_depths[i] = static_cast<VoxelBuffer::Depth>(d);
ERR_FAIL_COND_V(d >= VoxelBufferInternal::DEPTH_COUNT, false);
out_format.channel_depths[i] = static_cast<VoxelBufferInternal::Depth>(d);
}
out_format.sector_size = f->get_16();
@ -172,7 +172,7 @@ VoxelRegionFile::VoxelRegionFile() {
// Defaults
_header.format.block_size_po2 = 4;
_header.format.region_size = Vector3i(16, 16, 16);
_header.format.channel_depths.fill(VoxelBuffer::DEPTH_8_BIT);
_header.format.channel_depths.fill(VoxelBufferInternal::DEPTH_8_BIT);
_header.format.sector_size = 512;
}
@ -302,8 +302,8 @@ const VoxelRegionFormat &VoxelRegionFile::get_format() const {
}
Error VoxelRegionFile::load_block(
Vector3i position, Ref<VoxelBuffer> out_block, VoxelBlockSerializerInternal &serializer) {
ERR_FAIL_COND_V(out_block.is_null(), ERR_INVALID_PARAMETER);
Vector3i position, VoxelBufferInternal &out_block, VoxelBlockSerializerInternal &serializer) {
//
ERR_FAIL_COND_V(_file_access == nullptr, ERR_FILE_CANT_READ);
FileAccess *f = _file_access;
@ -315,10 +315,10 @@ Error VoxelRegionFile::load_block(
return ERR_DOES_NOT_EXIST;
}
ERR_FAIL_COND_V(out_block->get_size() != out_block->get_size(), ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(out_block.get_size() != out_block.get_size(), ERR_INVALID_PARAMETER);
// Configure block format
for (unsigned int channel_index = 0; channel_index < _header.format.channel_depths.size(); ++channel_index) {
out_block->set_channel_depth(channel_index, _header.format.channel_depths[channel_index]);
out_block.set_channel_depth(channel_index, _header.format.channel_depths[channel_index]);
}
const unsigned int sector_index = block_info.get_sector_index();
@ -329,15 +329,16 @@ Error VoxelRegionFile::load_block(
unsigned int block_data_size = f->get_32();
CRASH_COND(f->eof_reached());
ERR_FAIL_COND_V_MSG(!serializer.decompress_and_deserialize(f, block_data_size, **out_block), ERR_PARSE_ERROR,
ERR_FAIL_COND_V_MSG(!serializer.decompress_and_deserialize(f, block_data_size, out_block), ERR_PARSE_ERROR,
String("Failed to read block {0}").format(varray(position.to_vec3())));
return OK;
}
Error VoxelRegionFile::save_block(Vector3i position, Ref<VoxelBuffer> block, VoxelBlockSerializerInternal &serializer) {
ERR_FAIL_COND_V(block.is_null(), ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(_header.format.verify_block(**block) == false, ERR_INVALID_PARAMETER);
Error VoxelRegionFile::save_block(Vector3i position, VoxelBufferInternal &block,
VoxelBlockSerializerInternal &serializer) {
//
ERR_FAIL_COND_V(_header.format.verify_block(block) == false, ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(_file_access == nullptr, ERR_FILE_CANT_WRITE);
FileAccess *f = _file_access;
@ -360,7 +361,7 @@ Error VoxelRegionFile::save_block(Vector3i position, Ref<VoxelBuffer> block, Vox
// Check position matches the sectors rule
CRASH_COND((block_offset - _blocks_begin_offset) % _header.format.sector_size != 0);
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(**block);
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(block);
ERR_FAIL_COND_V(!res.success, ERR_INVALID_PARAMETER);
f->store_32(res.data.size());
const unsigned int written_size = sizeof(int) + res.data.size();
@ -388,7 +389,7 @@ Error VoxelRegionFile::save_block(Vector3i position, Ref<VoxelBuffer> block, Vox
const int old_sector_count = block_info.get_sector_count();
CRASH_COND(old_sector_count < 1);
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(**block);
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(block);
ERR_FAIL_COND_V(!res.success, ERR_INVALID_PARAMETER);
const std::vector<uint8_t> &data = res.data;
const int written_size = sizeof(int) + data.size();

View File

@ -1,7 +1,7 @@
#ifndef REGION_FILE_H
#define REGION_FILE_H
#include "../../storage/voxel_buffer.h"
#include "../../storage/voxel_buffer_internal.h"
#include "../../util/fixed_array.h"
#include "../../util/math/color8.h"
#include "../../util/math/vector3i.h"
@ -15,20 +15,21 @@ struct VoxelRegionFormat {
static const uint32_t MAX_BLOCKS_ACROSS = 255;
static const uint32_t CHANNEL_COUNT = 8;
static_assert(CHANNEL_COUNT == VoxelBuffer::MAX_CHANNELS, "This format doesn't support variable channel count");
static_assert(CHANNEL_COUNT == VoxelBufferInternal::MAX_CHANNELS,
"This format doesn't support variable channel count");
// How many voxels in a cubic block, as power of two
uint8_t block_size_po2 = 0;
// How many blocks across all dimensions (stored as 3 bytes)
Vector3i region_size;
FixedArray<VoxelBuffer::Depth, CHANNEL_COUNT> channel_depths;
FixedArray<VoxelBufferInternal::Depth, CHANNEL_COUNT> channel_depths;
// Blocks are stored at offsets multiple of that size
uint32_t sector_size = 0;
FixedArray<Color8, 256> palette;
bool has_palette = false;
bool validate() const;
bool verify_block(const VoxelBuffer &block) const;
bool verify_block(const VoxelBufferInternal &block) const;
};
struct VoxelRegionBlockInfo {
@ -80,8 +81,8 @@ public:
bool set_format(const VoxelRegionFormat &format);
const VoxelRegionFormat &get_format() const;
Error load_block(Vector3i position, Ref<VoxelBuffer> out_block, VoxelBlockSerializerInternal &serializer);
Error save_block(Vector3i position, Ref<VoxelBuffer> block, VoxelBlockSerializerInternal &serializer);
Error load_block(Vector3i position, VoxelBufferInternal &out_block, VoxelBlockSerializerInternal &serializer);
Error save_block(Vector3i position, VoxelBufferInternal &block, VoxelBlockSerializerInternal &serializer);
unsigned int get_header_block_count() const;
bool has_block(Vector3i position) const;

View File

@ -16,66 +16,76 @@ const uint8_t FORMAT_VERSION_LEGACY_2 = 2;
const uint8_t FORMAT_VERSION_LEGACY_1 = 1;
const char *META_FILE_NAME = "meta.vxrm";
} // namespace
thread_local VoxelBlockSerializerInternal VoxelStreamRegionFiles::_block_serializer;
// Sorts a sequence without modifying it, returning a sorted list of pointers
template <typename T, typename Comparer_T>
void get_sorted_pointers(Span<T> sequence, Comparer_T comparer, std::vector<T *> &out_sorted_sequence) {
struct PtrCompare {
Span<T> sequence;
Comparer_T comparer;
inline bool operator()(const T *a, const T *b) const {
return comparer(*a, *b);
}
};
out_sorted_sequence.resize(sequence.size());
for (unsigned int i = 0; i < sequence.size(); ++i) {
out_sorted_sequence[i] = &sequence[i];
}
SortArray<T *, PtrCompare> sort_array;
sort_array.compare.sequence = sequence;
sort_array.compare.comparer = comparer;
sort_array.sort(out_sorted_sequence.data(), out_sorted_sequence.size());
}
VoxelStreamRegionFiles::VoxelStreamRegionFiles() {
_meta.version = FORMAT_VERSION;
_meta.block_size_po2 = 4;
_meta.region_size_po2 = 4;
_meta.sector_size = 512; // next_power_of_2(_meta.block_size.volume() / 10) // based on compression ratios
_meta.lod_count = 1;
_meta.channel_depths.fill(VoxelBuffer::DEFAULT_CHANNEL_DEPTH);
_meta.channel_depths[VoxelBuffer::CHANNEL_TYPE] = VoxelBuffer::DEFAULT_TYPE_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBuffer::CHANNEL_SDF] = VoxelBuffer::DEFAULT_SDF_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBuffer::CHANNEL_INDICES] = VoxelBuffer::DEFAULT_INDICES_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBuffer::CHANNEL_WEIGHTS] = VoxelBuffer::DEFAULT_WEIGHTS_CHANNEL_DEPTH;
_meta.channel_depths.fill(VoxelBufferInternal::DEFAULT_CHANNEL_DEPTH);
_meta.channel_depths[VoxelBufferInternal::CHANNEL_TYPE] = VoxelBufferInternal::DEFAULT_TYPE_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBufferInternal::CHANNEL_SDF] = VoxelBufferInternal::DEFAULT_SDF_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBufferInternal::CHANNEL_INDICES] = VoxelBufferInternal::DEFAULT_INDICES_CHANNEL_DEPTH;
_meta.channel_depths[VoxelBufferInternal::CHANNEL_WEIGHTS] = VoxelBufferInternal::DEFAULT_WEIGHTS_CHANNEL_DEPTH;
}
VoxelStreamRegionFiles::~VoxelStreamRegionFiles() {
close_all_regions();
}
VoxelStream::Result VoxelStreamRegionFiles::emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r;
r.voxel_buffer = out_buffer;
r.origin_in_voxels = origin_in_voxels;
r.lod = lod;
Vector<VoxelBlockRequest> requests;
VoxelStream::Result VoxelStreamRegionFiles::emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels,
int lod) {
VoxelBlockRequest r{ out_buffer, origin_in_voxels, lod };
Vector<Result> results;
requests.push_back(r);
emerge_blocks(requests, results);
emerge_blocks(Span<VoxelBlockRequest>(&r, 1), results);
return results[0];
}
void VoxelStreamRegionFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r;
r.voxel_buffer = buffer;
r.origin_in_voxels = origin_in_voxels;
r.lod = lod;
Vector<VoxelBlockRequest> requests;
requests.push_back(r);
immerge_blocks(requests);
void VoxelStreamRegionFiles::immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r{ buffer, origin_in_voxels, lod };
immerge_blocks(Span<VoxelBlockRequest>(&r, 1));
}
void VoxelStreamRegionFiles::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results) {
void VoxelStreamRegionFiles::emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results) {
VOXEL_PROFILE_SCOPE();
// In order to minimize opening/closing files, requests are grouped according to their region.
// Had to copy input to sort it, as some areas in the module break if they get responses in different order
Vector<VoxelBlockRequest> sorted_blocks;
sorted_blocks.append_array(p_blocks);
SortArray<VoxelBlockRequest, BlockRequestComparator> sorter;
sorter.compare.self = this;
sorter.sort(sorted_blocks.ptrw(), sorted_blocks.size());
std::vector<VoxelBlockRequest *> sorted_blocks;
BlockRequestComparator comparator;
comparator.self = this;
get_sorted_pointers(p_blocks, comparator, sorted_blocks);
Vector<VoxelBlockRequest> fallback_requests;
for (int i = 0; i < sorted_blocks.size(); ++i) {
VoxelBlockRequest &r = sorted_blocks.write[i];
VoxelBlockRequest &r = *sorted_blocks[i];
const EmergeResult result = _emerge_block(r.voxel_buffer, r.origin_in_voxels, r.lod);
switch (result) {
case EMERGE_OK:
@ -94,32 +104,29 @@ void VoxelStreamRegionFiles::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks,
}
}
void VoxelStreamRegionFiles::immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks) {
void VoxelStreamRegionFiles::immerge_blocks(Span<VoxelBlockRequest> p_blocks) {
VOXEL_PROFILE_SCOPE();
// Had to copy input to sort it, as some areas in the module break if they get responses in different order
Vector<VoxelBlockRequest> sorted_blocks;
sorted_blocks.append_array(p_blocks);
SortArray<VoxelBlockRequest, BlockRequestComparator> sorter;
sorter.compare.self = this;
sorter.sort(sorted_blocks.ptrw(), sorted_blocks.size());
std::vector<VoxelBlockRequest *> sorted_blocks;
BlockRequestComparator comparator;
comparator.self = this;
get_sorted_pointers(p_blocks, comparator, sorted_blocks);
for (int i = 0; i < sorted_blocks.size(); ++i) {
VoxelBlockRequest &r = sorted_blocks.write[i];
VoxelBlockRequest &r = *sorted_blocks[i];
_immerge_block(r.voxel_buffer, r.origin_in_voxels, r.lod);
}
}
int VoxelStreamRegionFiles::get_used_channels_mask() const {
// Assuming all, since that stream can store anything.
return VoxelBuffer::ALL_CHANNELS_MASK;
return VoxelBufferInternal::ALL_CHANNELS_MASK;
}
VoxelStreamRegionFiles::EmergeResult VoxelStreamRegionFiles::_emerge_block(
Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) {
VOXEL_PROFILE_SCOPE();
ERR_FAIL_COND_V(out_buffer.is_null(), EMERGE_FAILED);
MutexLock lock(_mutex);
@ -140,12 +147,12 @@ VoxelStreamRegionFiles::EmergeResult VoxelStreamRegionFiles::_emerge_block(
CRASH_COND(!_meta_loaded);
ERR_FAIL_COND_V(lod >= _meta.lod_count, EMERGE_FAILED);
ERR_FAIL_COND_V(block_size != out_buffer->get_size(), EMERGE_FAILED);
ERR_FAIL_COND_V(block_size != out_buffer.get_size(), EMERGE_FAILED);
// Configure depths, as they might not be specified in old block data.
// Regions are expected to contain such depths, and use those in the buffer to know how much data to read.
for (unsigned int channel_index = 0; channel_index < _meta.channel_depths.size(); ++channel_index) {
out_buffer->set_channel_depth(channel_index, _meta.channel_depths[channel_index]);
out_buffer.set_channel_depth(channel_index, _meta.channel_depths[channel_index]);
}
const Vector3i block_pos = get_block_position_from_voxels(origin_in_voxels) >> lod;
@ -171,13 +178,12 @@ VoxelStreamRegionFiles::EmergeResult VoxelStreamRegionFiles::_emerge_block(
}
}
void VoxelStreamRegionFiles::_immerge_block(Ref<VoxelBuffer> voxel_buffer, Vector3i origin_in_voxels, int lod) {
void VoxelStreamRegionFiles::_immerge_block(VoxelBufferInternal &voxel_buffer, Vector3i origin_in_voxels, int lod) {
VOXEL_PROFILE_SCOPE();
MutexLock lock(_mutex);
ERR_FAIL_COND(_directory_path.empty());
ERR_FAIL_COND(voxel_buffer.is_null());
if (!_meta_loaded) {
// If it's not loaded, always try to load meta file first if it exists already,
@ -194,7 +200,7 @@ void VoxelStreamRegionFiles::_immerge_block(Ref<VoxelBuffer> voxel_buffer, Vecto
if (!_meta_saved) {
// First time we save the meta file, initialize it from the first block format
for (unsigned int i = 0; i < _meta.channel_depths.size(); ++i) {
_meta.channel_depths[i] = voxel_buffer->get_channel_depth(i);
_meta.channel_depths[i] = voxel_buffer.get_channel_depth(i);
}
VoxelFileResult err = save_meta();
ERR_FAIL_COND(err != VOXEL_FILE_OK);
@ -202,9 +208,9 @@ void VoxelStreamRegionFiles::_immerge_block(Ref<VoxelBuffer> voxel_buffer, Vecto
// Verify format
const Vector3i block_size = Vector3i(1 << _meta.block_size_po2);
ERR_FAIL_COND(voxel_buffer->get_size() != block_size);
for (unsigned int i = 0; i < VoxelBuffer::MAX_CHANNELS; ++i) {
ERR_FAIL_COND(voxel_buffer->get_channel_depth(i) != _meta.channel_depths[i]);
ERR_FAIL_COND(voxel_buffer.get_size() != block_size);
for (unsigned int i = 0; i < VoxelBufferInternal::MAX_CHANNELS; ++i) {
ERR_FAIL_COND(voxel_buffer.get_channel_depth(i) != _meta.channel_depths[i]);
}
const Vector3i region_size = Vector3i(1 << _meta.region_size_po2);
@ -249,11 +255,11 @@ static bool u32_from_json_variant(Variant v, uint32_t &i) {
return true;
}
static bool depth_from_json_variant(Variant &v, VoxelBuffer::Depth &d) {
static bool depth_from_json_variant(Variant &v, VoxelBufferInternal::Depth &d) {
uint8_t n;
ERR_FAIL_COND_V(!u8_from_json_variant(v, n), false);
ERR_FAIL_INDEX_V(n, VoxelBuffer::DEPTH_COUNT, false);
d = (VoxelBuffer::Depth)n;
ERR_FAIL_INDEX_V(n, VoxelBufferInternal::DEPTH_COUNT, false);
d = (VoxelBufferInternal::Depth)n;
return true;
}
@ -306,9 +312,9 @@ VoxelFileResult VoxelStreamRegionFiles::save_meta() {
static void migrate_region_meta_data(Dictionary &data) {
if (data["version"] == Variant(real_t(FORMAT_VERSION_LEGACY_1))) {
Array depths;
depths.resize(VoxelBuffer::MAX_CHANNELS);
depths.resize(VoxelBufferInternal::MAX_CHANNELS);
for (int i = 0; i < depths.size(); ++i) {
depths[i] = VoxelBuffer::DEFAULT_CHANNEL_DEPTH;
depths[i] = VoxelBufferInternal::DEFAULT_CHANNEL_DEPTH;
}
data["channel_depths"] = depths;
data["version"] = FORMAT_VERSION_LEGACY_2;
@ -669,13 +675,11 @@ void VoxelStreamRegionFiles::_convert_files(Meta new_meta) {
continue;
}
Ref<VoxelBuffer> old_block;
old_block.instance();
old_block->create(old_block_size.x, old_block_size.y, old_block_size.z);
VoxelBufferInternal old_block;
old_block.create(old_block_size.x, old_block_size.y, old_block_size.z);
Ref<VoxelBuffer> new_block;
new_block.instance();
new_block->create(new_block_size.x, new_block_size.y, new_block_size.z);
VoxelBufferInternal new_block;
new_block.create(new_block_size.x, new_block_size.y, new_block_size.z);
// Load block from old stream
Vector3i block_rpos = old_region->region.get_block_position_from_index(j);
@ -697,13 +701,13 @@ void VoxelStreamRegionFiles::_convert_files(Meta new_meta) {
// Copy to a sub-area of one block
emerge_block(new_block, new_block_pos * new_block_size << region_info.lod, region_info.lod);
Vector3i dst_pos = rel * old_block->get_size();
Vector3i dst_pos = rel * old_block.get_size();
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS; ++channel_index) {
new_block->copy_from(**old_block, Vector3i(), old_block->get_size(), dst_pos, channel_index);
new_block.copy_from(old_block, Vector3i(), old_block.get_size(), dst_pos, channel_index);
}
new_block->compress_uniform_channels();
new_block.compress_uniform_channels();
immerge_block(new_block, new_block_pos * new_block_size << region_info.lod, region_info.lod);
} else {
@ -714,12 +718,12 @@ void VoxelStreamRegionFiles::_convert_files(Meta new_meta) {
for (rpos.z = 0; rpos.z < area.z; ++rpos.z) {
for (rpos.x = 0; rpos.x < area.x; ++rpos.x) {
for (rpos.y = 0; rpos.y < area.y; ++rpos.y) {
Vector3i src_min = rpos * new_block->get_size();
Vector3i src_max = src_min + new_block->get_size();
Vector3i src_min = rpos * new_block.get_size();
Vector3i src_max = src_min + new_block.get_size();
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS;
++channel_index) {
new_block->copy_from(**old_block, src_min, src_max, Vector3i(), channel_index);
new_block.copy_from(old_block, src_min, src_max, Vector3i(), channel_index);
}
immerge_block(new_block,

View File

@ -24,11 +24,11 @@ public:
VoxelStreamRegionFiles();
~VoxelStreamRegionFiles();
Result emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) override;
Result emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) override;
void emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results) override;
void immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks) override;
void emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results) override;
void immerge_blocks(Span<VoxelBlockRequest> p_blocks) override;
int get_used_channels_mask() const override;
@ -65,8 +65,8 @@ private:
EMERGE_FAILED
};
EmergeResult _emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod);
void _immerge_block(Ref<VoxelBuffer> voxel_buffer, Vector3i origin_in_voxels, int lod);
EmergeResult _emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod);
void _immerge_block(VoxelBufferInternal &voxel_buffer, Vector3i origin_in_voxels, int lod);
VoxelFileResult save_meta();
VoxelFileResult load_meta();
@ -85,7 +85,7 @@ private:
uint8_t lod_count = 0;
uint8_t block_size_po2 = 0; // How many voxels in a cubic block
uint8_t region_size_po2 = 0; // How many blocks in one cubic region
FixedArray<VoxelBuffer::Depth, VoxelBuffer::MAX_CHANNELS> channel_depths;
FixedArray<VoxelBufferInternal::Depth, VoxelBufferInternal::MAX_CHANNELS> channel_depths;
uint32_t sector_size = 0; // Blocks are stored at offsets multiple of that size
};

View File

@ -569,29 +569,20 @@ String VoxelStreamSQLite::get_database_path() const {
return _connection_path;
}
VoxelStream::Result VoxelStreamSQLite::emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r;
r.lod = lod;
r.origin_in_voxels = origin_in_voxels;
r.voxel_buffer = out_buffer;
Vector<VoxelBlockRequest> requests;
Vector<VoxelStream::Result> results;
requests.push_back(r);
emerge_blocks(requests, results);
VoxelStream::Result VoxelStreamSQLite::emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r{ out_buffer, origin_in_voxels, lod };
Vector<Result> results;
emerge_blocks(Span<VoxelBlockRequest>(&r, 1), results);
CRASH_COND(results.size() != 1);
return results[0];
}
void VoxelStreamSQLite::immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r;
r.voxel_buffer = buffer;
r.origin_in_voxels = origin_in_voxels;
r.lod = lod;
Vector<VoxelBlockRequest> requests;
requests.push_back(r);
immerge_blocks(requests);
void VoxelStreamSQLite::immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) {
VoxelBlockRequest r{ buffer, origin_in_voxels, lod };
immerge_blocks(Span<VoxelBlockRequest>(&r, 1));
}
void VoxelStreamSQLite::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results) {
void VoxelStreamSQLite::emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results) {
VOXEL_PROFILE_SCOPE();
// TODO Get block size from database
@ -602,7 +593,7 @@ void VoxelStreamSQLite::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vecto
// Check the cache first
Vector<int> blocks_to_load;
for (int i = 0; i < p_blocks.size(); ++i) {
VoxelBlockRequest &wr = p_blocks.write[i];
VoxelBlockRequest &wr = p_blocks[i];
const Vector3i pos = wr.origin_in_voxels >> bs_po2;
Ref<VoxelBuffer> vb;
@ -638,10 +629,9 @@ void VoxelStreamSQLite::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vecto
const Result res = con->load_block(loc, _temp_block_data, VoxelStreamSQLiteInternal::VOXELS);
if (res == RESULT_BLOCK_FOUND) {
VoxelBlockRequest &wr = p_blocks.write[ri];
VoxelBlockRequest &wr = p_blocks[ri];
// TODO Not sure if we should actually expect non-null. There can be legit not found blocks.
ERR_FAIL_COND(wr.voxel_buffer.is_null());
_voxel_block_serializer.decompress_and_deserialize(_temp_block_data, **wr.voxel_buffer);
_voxel_block_serializer.decompress_and_deserialize(_temp_block_data, wr.voxel_buffer);
}
out_results.write[i] = res;
@ -652,13 +642,13 @@ void VoxelStreamSQLite::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vecto
recycle_connection(con);
}
void VoxelStreamSQLite::immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks) {
void VoxelStreamSQLite::immerge_blocks(Span<VoxelBlockRequest> p_blocks) {
// TODO Get block size from database
const int bs_po2 = VoxelConstants::DEFAULT_BLOCK_SIZE_PO2;
// First put in cache
for (int i = 0; i < p_blocks.size(); ++i) {
const VoxelBlockRequest &r = p_blocks[i];
VoxelBlockRequest &r = p_blocks[i];
const Vector3i pos = r.origin_in_voxels >> bs_po2;
if (!BlockLocation::validate(pos, r.lod)) {
@ -763,7 +753,7 @@ void VoxelStreamSQLite::save_instance_blocks(Span<VoxelStreamInstanceDataRequest
int VoxelStreamSQLite::get_used_channels_mask() const {
// Assuming all, since that stream can store anything.
return VoxelBuffer::ALL_CHANNELS_MASK;
return VoxelBufferInternal::ALL_CHANNELS_MASK;
}
void VoxelStreamSQLite::flush_cache() {
@ -799,13 +789,13 @@ void VoxelStreamSQLite::flush_cache(VoxelStreamSQLiteInternal *con) {
// Save voxels
if (block.has_voxels) {
if (block.voxels.is_valid()) {
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(**block.voxels);
ERR_FAIL_COND(!res.success);
con->save_block(loc, res.data, VoxelStreamSQLiteInternal::VOXELS);
} else {
if (block.voxels_deleted) {
const std::vector<uint8_t> empty;
con->save_block(loc, empty, VoxelStreamSQLiteInternal::VOXELS);
} else {
VoxelBlockSerializerInternal::SerializeResult res = serializer.serialize_and_compress(block.voxels);
ERR_FAIL_COND(!res.success);
con->save_block(loc, res.data, VoxelStreamSQLiteInternal::VOXELS);
}
}

View File

@ -21,11 +21,11 @@ public:
void set_database_path(String path);
String get_database_path() const;
Result emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) override;
Result emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) override;
void emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results) override;
void immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks) override;
void emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results) override;
void immerge_blocks(Span<VoxelBlockRequest> p_blocks) override;
bool supports_instance_blocks() const override;
void load_instance_blocks(

View File

@ -3,8 +3,9 @@
#include "../storage/voxel_buffer.h"
#include "vox_data.h"
Error VoxelVoxLoader::load_from_file(String fpath, Ref<VoxelBuffer> voxels, Ref<VoxelColorPalette> palette) {
ERR_FAIL_COND_V(voxels.is_null(), ERR_INVALID_PARAMETER);
Error VoxelVoxLoader::load_from_file(String fpath, Ref<VoxelBuffer> p_voxels, Ref<VoxelColorPalette> palette) {
ERR_FAIL_COND_V(p_voxels.is_null(), ERR_INVALID_PARAMETER);
VoxelBufferInternal &voxels = p_voxels->get_buffer();
vox::Data data;
Error load_err = data.load_from_file(fpath);
@ -12,14 +13,14 @@ Error VoxelVoxLoader::load_from_file(String fpath, Ref<VoxelBuffer> voxels, Ref<
const vox::Model &model = data.get_model(0);
const VoxelBuffer::ChannelId channel = VoxelBuffer::CHANNEL_COLOR;
const VoxelBufferInternal::ChannelId channel = VoxelBufferInternal::CHANNEL_COLOR;
Span<const Color8> src_palette = to_span_const(data.get_palette());
const VoxelBuffer::Depth depth = voxels->get_channel_depth(VoxelBuffer::CHANNEL_COLOR);
const VoxelBufferInternal::Depth depth = voxels.get_channel_depth(VoxelBufferInternal::CHANNEL_COLOR);
Span<uint8_t> dst_raw;
voxels->create(model.size);
voxels->decompress_channel(channel);
CRASH_COND(!voxels->get_channel_raw(channel, dst_raw));
voxels.create(model.size);
voxels.decompress_channel(channel);
CRASH_COND(!voxels.get_channel_raw(channel, dst_raw));
if (palette.is_valid()) {
for (size_t i = 0; i < src_palette.size(); ++i) {
@ -27,11 +28,11 @@ Error VoxelVoxLoader::load_from_file(String fpath, Ref<VoxelBuffer> voxels, Ref<
}
switch (depth) {
case VoxelBuffer::DEPTH_8_BIT: {
case VoxelBufferInternal::DEPTH_8_BIT: {
memcpy(dst_raw.data(), model.color_indexes.data(), model.color_indexes.size());
} break;
case VoxelBuffer::DEPTH_16_BIT: {
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<uint16_t> dst = dst_raw.reinterpret_cast_to<uint16_t>();
for (size_t i = 0; i < dst.size(); ++i) {
dst[i] = model.color_indexes[i];
@ -45,14 +46,14 @@ Error VoxelVoxLoader::load_from_file(String fpath, Ref<VoxelBuffer> voxels, Ref<
} else {
switch (depth) {
case VoxelBuffer::DEPTH_8_BIT: {
case VoxelBufferInternal::DEPTH_8_BIT: {
for (size_t i = 0; i < dst_raw.size(); ++i) {
const uint8_t ci = model.color_indexes[i];
dst_raw[i] = src_palette[ci].to_u8();
}
} break;
case VoxelBuffer::DEPTH_16_BIT: {
case VoxelBufferInternal::DEPTH_16_BIT: {
Span<uint16_t> dst = dst_raw.reinterpret_cast_to<uint16_t>();
for (size_t i = 0; i < dst.size(); ++i) {
const uint8_t ci = model.color_indexes[i];

View File

@ -11,7 +11,7 @@ class VoxelVoxLoader : public Reference {
GDCLASS(VoxelVoxLoader, Reference);
public:
Error load_from_file(String fpath, Ref<VoxelBuffer> voxels, Ref<VoxelColorPalette> palette);
Error load_from_file(String fpath, Ref<VoxelBuffer> p_voxels, Ref<VoxelColorPalette> palette);
// TODO Have chunked loading for better memory usage
// TODO Saving

View File

@ -8,7 +8,7 @@
// TODO Rename VoxelStreamBlockRequest
struct VoxelBlockRequest {
Ref<VoxelBuffer> voxel_buffer;
VoxelBufferInternal &voxel_buffer;
Vector3i origin_in_voxels;
int lod;
};

View File

@ -19,18 +19,18 @@ const unsigned int BLOCK_TRAILING_MAGIC_SIZE = 4;
const unsigned int BLOCK_METADATA_HEADER_SIZE = sizeof(uint32_t);
} // namespace
size_t get_metadata_size_in_bytes(const VoxelBuffer &buffer) {
size_t get_metadata_size_in_bytes(const VoxelBufferInternal &buffer) {
size_t size = 0;
const Map<Vector3i, Variant>::Element *elem = buffer.get_voxel_metadata().front();
while (elem != nullptr) {
const Vector3i pos = elem->key();
ERR_FAIL_COND_V_MSG(pos.x < 0 || static_cast<uint32_t>(pos.x) >= VoxelBuffer::MAX_SIZE, 0,
ERR_FAIL_COND_V_MSG(pos.x < 0 || static_cast<uint32_t>(pos.x) >= VoxelBufferInternal::MAX_SIZE, 0,
"Invalid voxel metadata X position");
ERR_FAIL_COND_V_MSG(pos.y < 0 || static_cast<uint32_t>(pos.y) >= VoxelBuffer::MAX_SIZE, 0,
ERR_FAIL_COND_V_MSG(pos.y < 0 || static_cast<uint32_t>(pos.y) >= VoxelBufferInternal::MAX_SIZE, 0,
"Invalid voxel metadata Y position");
ERR_FAIL_COND_V_MSG(pos.z < 0 || static_cast<uint32_t>(pos.z) >= VoxelBuffer::MAX_SIZE, 0,
ERR_FAIL_COND_V_MSG(pos.z < 0 || static_cast<uint32_t>(pos.z) >= VoxelBufferInternal::MAX_SIZE, 0,
"Invalid voxel metadata Z position");
size += 3 * sizeof(uint16_t); // Positions are stored as 3 unsigned shorts
@ -72,7 +72,7 @@ inline T read(uint8_t *&src) {
}
// The target buffer MUST have correct size. Recoverable errors must have been checked before.
void serialize_metadata(uint8_t *p_dst, const VoxelBuffer &buffer, const size_t metadata_size) {
void serialize_metadata(uint8_t *p_dst, const VoxelBufferInternal &buffer, const size_t metadata_size) {
uint8_t *dst = p_dst;
{
@ -88,7 +88,7 @@ void serialize_metadata(uint8_t *p_dst, const VoxelBuffer &buffer, const size_t
const Map<Vector3i, Variant>::Element *elem = buffer.get_voxel_metadata().front();
while (elem != nullptr) {
// Serializing key as ushort because it's more than enough for a 3D dense array
static_assert(VoxelBuffer::MAX_SIZE <= 65535, "Maximum size exceeds serialization support");
static_assert(VoxelBufferInternal::MAX_SIZE <= 65535, "Maximum size exceeds serialization support");
const Vector3i pos = elem->key();
write<uint16_t>(dst, pos.x);
write<uint16_t>(dst, pos.y);
@ -109,7 +109,7 @@ void serialize_metadata(uint8_t *p_dst, const VoxelBuffer &buffer, const size_t
.format(varray(SIZE_T_TO_VARIANT(metadata_size), (int)(dst - p_dst))));
}
bool deserialize_metadata(uint8_t *p_src, VoxelBuffer &buffer, const size_t metadata_size) {
bool deserialize_metadata(uint8_t *p_src, VoxelBufferInternal &buffer, const size_t metadata_size) {
uint8_t *src = p_src;
size_t remaining_length = metadata_size;
@ -146,26 +146,26 @@ bool deserialize_metadata(uint8_t *p_src, VoxelBuffer &buffer, const size_t meta
return true;
}
size_t get_size_in_bytes(const VoxelBuffer &buffer, size_t &metadata_size) {
size_t get_size_in_bytes(const VoxelBufferInternal &buffer, size_t &metadata_size) {
// Version and size
size_t size = 1 * sizeof(uint8_t) + 3 * sizeof(uint16_t);
const Vector3i size_in_voxels = buffer.get_size();
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS; ++channel_index) {
const VoxelBuffer::Compression compression = buffer.get_channel_compression(channel_index);
const VoxelBuffer::Depth depth = buffer.get_channel_depth(channel_index);
for (unsigned int channel_index = 0; channel_index < VoxelBufferInternal::MAX_CHANNELS; ++channel_index) {
const VoxelBufferInternal::Compression compression = buffer.get_channel_compression(channel_index);
const VoxelBufferInternal::Depth depth = buffer.get_channel_depth(channel_index);
// For format value
size += 1;
switch (compression) {
case VoxelBuffer::COMPRESSION_NONE: {
size += VoxelBuffer::get_size_in_bytes_for_volume(size_in_voxels, depth);
case VoxelBufferInternal::COMPRESSION_NONE: {
size += VoxelBufferInternal::get_size_in_bytes_for_volume(size_in_voxels, depth);
} break;
case VoxelBuffer::COMPRESSION_UNIFORM: {
size += VoxelBuffer::get_depth_bit_count(depth) >> 3;
case VoxelBufferInternal::COMPRESSION_UNIFORM: {
size += VoxelBufferInternal::get_depth_bit_count(depth) >> 3;
} break;
default:
@ -184,7 +184,9 @@ size_t get_size_in_bytes(const VoxelBuffer &buffer, size_t &metadata_size) {
return size + metadata_size_with_header + BLOCK_TRAILING_MAGIC_SIZE;
}
VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::serialize(const VoxelBuffer &voxel_buffer) {
VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::serialize(
const VoxelBufferInternal &voxel_buffer) {
//
VOXEL_PROFILE_SCOPE();
// Cannot serialize an empty block
ERR_FAIL_COND_V(voxel_buffer.get_size().volume() == 0, SerializeResult(_data, false));
@ -207,34 +209,34 @@ VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::seri
ERR_FAIL_COND_V(voxel_buffer.get_size().z > std::numeric_limits<uint16_t>().max(), SerializeResult(_data, false));
f->store_16(voxel_buffer.get_size().z);
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS; ++channel_index) {
const VoxelBuffer::Compression compression = voxel_buffer.get_channel_compression(channel_index);
const VoxelBuffer::Depth depth = voxel_buffer.get_channel_depth(channel_index);
for (unsigned int channel_index = 0; channel_index < VoxelBufferInternal::MAX_CHANNELS; ++channel_index) {
const VoxelBufferInternal::Compression compression = voxel_buffer.get_channel_compression(channel_index);
const VoxelBufferInternal::Depth depth = voxel_buffer.get_channel_depth(channel_index);
// Low nibble: compression (up to 16 values allowed)
// High nibble: depth (up to 16 values allowed)
const uint8_t fmt = static_cast<uint8_t>(compression) | (static_cast<uint8_t>(depth) << 4);
f->store_8(fmt);
switch (compression) {
case VoxelBuffer::COMPRESSION_NONE: {
case VoxelBufferInternal::COMPRESSION_NONE: {
Span<uint8_t> data;
ERR_FAIL_COND_V(!voxel_buffer.get_channel_raw(channel_index, data), SerializeResult(_data, false));
f->store_buffer(data.data(), data.size());
} break;
case VoxelBuffer::COMPRESSION_UNIFORM: {
case VoxelBufferInternal::COMPRESSION_UNIFORM: {
const uint64_t v = voxel_buffer.get_voxel(Vector3i(), channel_index);
switch (depth) {
case VoxelBuffer::DEPTH_8_BIT:
case VoxelBufferInternal::DEPTH_8_BIT:
f->store_8(v);
break;
case VoxelBuffer::DEPTH_16_BIT:
case VoxelBufferInternal::DEPTH_16_BIT:
f->store_16(v);
break;
case VoxelBuffer::DEPTH_32_BIT:
case VoxelBufferInternal::DEPTH_32_BIT:
f->store_32(v);
break;
case VoxelBuffer::DEPTH_64_BIT:
case VoxelBufferInternal::DEPTH_64_BIT:
f->store_64(v);
break;
default:
@ -262,7 +264,9 @@ VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::seri
return SerializeResult(_data, true);
}
bool VoxelBlockSerializerInternal::deserialize(const std::vector<uint8_t> &p_data, VoxelBuffer &out_voxel_buffer) {
bool VoxelBlockSerializerInternal::deserialize(const std::vector<uint8_t> &p_data,
VoxelBufferInternal &out_voxel_buffer) {
//
VOXEL_PROFILE_SCOPE();
ERR_FAIL_COND_V(p_data.size() < sizeof(uint32_t), false);
@ -295,21 +299,21 @@ bool VoxelBlockSerializerInternal::deserialize(const std::vector<uint8_t> &p_dat
out_voxel_buffer.create(Vector3i(size_x, size_y, size_z));
}
for (unsigned int channel_index = 0; channel_index < VoxelBuffer::MAX_CHANNELS; ++channel_index) {
for (unsigned int channel_index = 0; channel_index < VoxelBufferInternal::MAX_CHANNELS; ++channel_index) {
const uint8_t fmt = f->get_8();
const uint8_t compression_value = fmt & 0xf;
const uint8_t depth_value = (fmt >> 4) & 0xf;
ERR_FAIL_COND_V_MSG(compression_value >= VoxelBuffer::COMPRESSION_COUNT, false,
ERR_FAIL_COND_V_MSG(compression_value >= VoxelBufferInternal::COMPRESSION_COUNT, false,
"At offset 0x" + String::num_int64(f->get_position() - 1, 16));
ERR_FAIL_COND_V_MSG(depth_value >= VoxelBuffer::DEPTH_COUNT, false,
ERR_FAIL_COND_V_MSG(depth_value >= VoxelBufferInternal::DEPTH_COUNT, false,
"At offset 0x" + String::num_int64(f->get_position() - 1, 16));
VoxelBuffer::Compression compression = (VoxelBuffer::Compression)compression_value;
VoxelBuffer::Depth depth = (VoxelBuffer::Depth)depth_value;
VoxelBufferInternal::Compression compression = (VoxelBufferInternal::Compression)compression_value;
VoxelBufferInternal::Depth depth = (VoxelBufferInternal::Depth)depth_value;
out_voxel_buffer.set_channel_depth(channel_index, depth);
switch (compression) {
case VoxelBuffer::COMPRESSION_NONE: {
case VoxelBufferInternal::COMPRESSION_NONE: {
out_voxel_buffer.decompress_channel(channel_index);
Span<uint8_t> buffer;
@ -323,19 +327,19 @@ bool VoxelBlockSerializerInternal::deserialize(const std::vector<uint8_t> &p_dat
} break;
case VoxelBuffer::COMPRESSION_UNIFORM: {
case VoxelBufferInternal::COMPRESSION_UNIFORM: {
uint64_t v;
switch (out_voxel_buffer.get_channel_depth(channel_index)) {
case VoxelBuffer::DEPTH_8_BIT:
case VoxelBufferInternal::DEPTH_8_BIT:
v = f->get_8();
break;
case VoxelBuffer::DEPTH_16_BIT:
case VoxelBufferInternal::DEPTH_16_BIT:
v = f->get_16();
break;
case VoxelBuffer::DEPTH_32_BIT:
case VoxelBufferInternal::DEPTH_32_BIT:
v = f->get_32();
break;
case VoxelBuffer::DEPTH_64_BIT:
case VoxelBufferInternal::DEPTH_64_BIT:
v = f->get_64();
break;
default:
@ -364,7 +368,7 @@ bool VoxelBlockSerializerInternal::deserialize(const std::vector<uint8_t> &p_dat
}
VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::serialize_and_compress(
const VoxelBuffer &voxel_buffer) {
const VoxelBufferInternal &voxel_buffer) {
VOXEL_PROFILE_SCOPE();
SerializeResult res = serialize(voxel_buffer);
@ -380,7 +384,7 @@ VoxelBlockSerializerInternal::SerializeResult VoxelBlockSerializerInternal::seri
}
bool VoxelBlockSerializerInternal::decompress_and_deserialize(
const std::vector<uint8_t> &p_data, VoxelBuffer &out_voxel_buffer) {
const std::vector<uint8_t> &p_data, VoxelBufferInternal &out_voxel_buffer) {
VOXEL_PROFILE_SCOPE();
const bool res = VoxelCompressedData::decompress(Span<const uint8_t>(p_data.data(), 0, p_data.size()), _data);
@ -390,7 +394,7 @@ bool VoxelBlockSerializerInternal::decompress_and_deserialize(
}
bool VoxelBlockSerializerInternal::decompress_and_deserialize(
FileAccess *f, unsigned int size_to_read, VoxelBuffer &out_voxel_buffer) {
FileAccess *f, unsigned int size_to_read, VoxelBufferInternal &out_voxel_buffer) {
VOXEL_PROFILE_SCOPE();
ERR_FAIL_COND_V(f == nullptr, false);
@ -407,15 +411,15 @@ bool VoxelBlockSerializerInternal::decompress_and_deserialize(
return decompress_and_deserialize(_compressed_data, out_voxel_buffer);
}
int VoxelBlockSerializerInternal::serialize(Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, bool compress) {
int VoxelBlockSerializerInternal::serialize(Ref<StreamPeer> peer, VoxelBufferInternal &voxel_buffer, bool compress) {
if (compress) {
SerializeResult res = serialize_and_compress(**voxel_buffer);
SerializeResult res = serialize_and_compress(voxel_buffer);
ERR_FAIL_COND_V(!res.success, -1);
peer->put_data(res.data.data(), res.data.size());
return res.data.size();
} else {
SerializeResult res = serialize(**voxel_buffer);
SerializeResult res = serialize(voxel_buffer);
ERR_FAIL_COND_V(!res.success, -1);
peer->put_data(res.data.data(), res.data.size());
return res.data.size();
@ -423,19 +427,19 @@ int VoxelBlockSerializerInternal::serialize(Ref<StreamPeer> peer, Ref<VoxelBuffe
}
void VoxelBlockSerializerInternal::deserialize(
Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, int size, bool decompress) {
Ref<StreamPeer> peer, VoxelBufferInternal &voxel_buffer, int size, bool decompress) {
if (decompress) {
_compressed_data.resize(size);
const Error err = peer->get_data(_compressed_data.data(), _compressed_data.size());
ERR_FAIL_COND(err != OK);
bool success = decompress_and_deserialize(_compressed_data, **voxel_buffer);
bool success = decompress_and_deserialize(_compressed_data, voxel_buffer);
ERR_FAIL_COND(!success);
} else {
_data.resize(size);
const Error err = peer->get_data(_data.data(), _data.size());
ERR_FAIL_COND(err != OK);
deserialize(_data, **voxel_buffer);
deserialize(_data, voxel_buffer);
}
}
@ -444,14 +448,14 @@ void VoxelBlockSerializerInternal::deserialize(
int VoxelBlockSerializer::serialize(Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, bool compress) {
ERR_FAIL_COND_V(voxel_buffer.is_null(), 0);
ERR_FAIL_COND_V(peer.is_null(), 0);
return _serializer.serialize(peer, voxel_buffer, compress);
return _serializer.serialize(peer, voxel_buffer->get_buffer(), compress);
}
void VoxelBlockSerializer::deserialize(Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, int size, bool decompress) {
ERR_FAIL_COND(voxel_buffer.is_null());
ERR_FAIL_COND(peer.is_null());
ERR_FAIL_COND(size <= 0);
_serializer.deserialize(peer, voxel_buffer, size, decompress);
_serializer.deserialize(peer, voxel_buffer->get_buffer(), size, decompress);
}
void VoxelBlockSerializer::_bind_methods() {

View File

@ -5,8 +5,8 @@
#include <core/reference.h>
#include <vector>
class VoxelBuffer;
class StreamPeer;
class VoxelBufferInternal;
class VoxelBlockSerializerInternal {
// Had to be named differently to not conflict with the wrapper for Godot script API
@ -19,15 +19,15 @@ public:
data(p_data), success(p_success) {}
};
SerializeResult serialize(const VoxelBuffer &voxel_buffer);
bool deserialize(const std::vector<uint8_t> &p_data, VoxelBuffer &out_voxel_buffer);
SerializeResult serialize(const VoxelBufferInternal &voxel_buffer);
bool deserialize(const std::vector<uint8_t> &p_data, VoxelBufferInternal &out_voxel_buffer);
SerializeResult serialize_and_compress(const VoxelBuffer &voxel_buffer);
bool decompress_and_deserialize(const std::vector<uint8_t> &p_data, VoxelBuffer &out_voxel_buffer);
bool decompress_and_deserialize(FileAccess *f, unsigned int size_to_read, VoxelBuffer &out_voxel_buffer);
SerializeResult serialize_and_compress(const VoxelBufferInternal &voxel_buffer);
bool decompress_and_deserialize(const std::vector<uint8_t> &p_data, VoxelBufferInternal &out_voxel_buffer);
bool decompress_and_deserialize(FileAccess *f, unsigned int size_to_read, VoxelBufferInternal &out_voxel_buffer);
int serialize(Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, bool compress);
void deserialize(Ref<StreamPeer> peer, Ref<VoxelBuffer> voxel_buffer, int size, bool decompress);
int serialize(Ref<StreamPeer> peer, VoxelBufferInternal &voxel_buffer, bool compress);
void deserialize(Ref<StreamPeer> peer, VoxelBufferInternal &voxel_buffer, int size, bool decompress);
private:
// Make thread-locals?
@ -37,6 +37,8 @@ private:
FileAccessMemory _file_access_memory;
};
class VoxelBuffer;
class VoxelBlockSerializer : public Reference {
GDCLASS(VoxelBlockSerializer, Reference)
public:

View File

@ -7,27 +7,25 @@ VoxelStream::VoxelStream() {
VoxelStream::~VoxelStream() {
}
VoxelStream::Result VoxelStream::emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND_V(out_buffer.is_null(), RESULT_ERROR);
VoxelStream::Result VoxelStream::emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) {
// Can be implemented in subclasses
return RESULT_BLOCK_NOT_FOUND;
}
void VoxelStream::immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND(buffer.is_null());
void VoxelStream::immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) {
// Can be implemented in subclasses
}
void VoxelStream::emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results) {
void VoxelStream::emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results) {
// Default implementation. May matter for some stream types to optimize loading.
for (int i = 0; i < p_blocks.size(); ++i) {
VoxelBlockRequest &r = p_blocks.write[i];
VoxelBlockRequest &r = p_blocks[i];
const Result res = emerge_block(r.voxel_buffer, r.origin_in_voxels, r.lod);
out_results.push_back(res);
}
}
void VoxelStream::immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks) {
void VoxelStream::immerge_blocks(Span<VoxelBlockRequest> p_blocks) {
for (int i = 0; i < p_blocks.size(); ++i) {
const VoxelBlockRequest &r = p_blocks[i];
immerge_block(r.voxel_buffer, r.origin_in_voxels, r.lod);
@ -77,12 +75,14 @@ int VoxelStream::get_lod_count() const {
VoxelStream::Result VoxelStream::_b_emerge_block(Ref<VoxelBuffer> out_buffer, Vector3 origin_in_voxels, int lod) {
ERR_FAIL_COND_V(lod < 0, RESULT_ERROR);
return emerge_block(out_buffer, Vector3i(origin_in_voxels), lod);
ERR_FAIL_COND_V(out_buffer.is_null(), RESULT_ERROR);
return emerge_block(out_buffer->get_buffer(), Vector3i(origin_in_voxels), lod);
}
void VoxelStream::_b_immerge_block(Ref<VoxelBuffer> buffer, Vector3 origin_in_voxels, int lod) {
ERR_FAIL_COND(lod < 0);
immerge_block(buffer, Vector3i(origin_in_voxels), lod);
ERR_FAIL_COND(buffer.is_null(), RESULT_ERROR);
immerge_block(buffer->get_buffer(), Vector3i(origin_in_voxels), lod);
}
int VoxelStream::_b_get_used_channels_mask() const {

View File

@ -1,7 +1,6 @@
#ifndef VOXEL_STREAM_H
#define VOXEL_STREAM_H
#include "../generators/voxel_generator.h"
#include "instance_data.h"
#include "voxel_block_request.h"
#include <core/resource.h>
@ -37,21 +36,19 @@ public:
// Queries a block of voxels beginning at the given world-space voxel position and LOD.
// If you use LOD, the result at a given coordinate must always remain the same regardless of it.
// In other words, voxels values must solely depend on their coordinates or fixed parameters.
virtual Result emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod);
virtual Result emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod);
// TODO Deprecate
virtual void immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod);
virtual void immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod);
// TODO Rename load_voxel_blocks
// TODO Pass with Span
// Note: vector is passed by ref for performance. Don't reorder it.
virtual void emerge_blocks(Vector<VoxelBlockRequest> &p_blocks, Vector<Result> &out_results);
// Note: Don't modify the order of `p_blocks`.
virtual void emerge_blocks(Span<VoxelBlockRequest> p_blocks, Vector<Result> &out_results);
// TODO Rename save_voxel_blocks
// TODO Pass with Span
// Returns multiple blocks of voxels to the stream.
// This function is recommended if you save to files, because you can batch their access.
virtual void immerge_blocks(const Vector<VoxelBlockRequest> &p_blocks);
virtual void immerge_blocks(Span<VoxelBlockRequest> p_blocks);
virtual bool supports_instance_blocks() const;

View File

@ -19,15 +19,14 @@ VoxelStreamBlockFiles::VoxelStreamBlockFiles() {
_meta.block_size_po2 = 4;
_meta.lod_count = 1;
_meta.version = FORMAT_VERSION;
_meta.channel_depths.fill(VoxelBuffer::DEFAULT_CHANNEL_DEPTH);
_meta.channel_depths.fill(VoxelBufferInternal::DEFAULT_CHANNEL_DEPTH);
}
// TODO Have configurable block size
VoxelStream::Result VoxelStreamBlockFiles::emerge_block(
Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND_V(out_buffer.is_null(), RESULT_ERROR);
VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) {
//
if (_directory_path.empty()) {
return RESULT_BLOCK_NOT_FOUND;
}
@ -43,7 +42,7 @@ VoxelStream::Result VoxelStreamBlockFiles::emerge_block(
const Vector3i block_size(1 << _meta.block_size_po2);
ERR_FAIL_COND_V(lod >= _meta.lod_count, RESULT_ERROR);
ERR_FAIL_COND_V(block_size != out_buffer->get_size(), RESULT_ERROR);
ERR_FAIL_COND_V(block_size != out_buffer.get_size(), RESULT_ERROR);
Vector3i block_pos = get_block_position(origin_in_voxels) >> lod;
String file_path = get_block_file_path(block_pos, lod);
@ -75,11 +74,11 @@ VoxelStream::Result VoxelStreamBlockFiles::emerge_block(
// Configure depths, as they currently are only specified in the meta file.
// Files are expected to contain such depths, and use those in the buffer to know how much data to read.
for (unsigned int channel_index = 0; channel_index < _meta.channel_depths.size(); ++channel_index) {
out_buffer->set_channel_depth(channel_index, _meta.channel_depths[channel_index]);
out_buffer.set_channel_depth(channel_index, _meta.channel_depths[channel_index]);
}
uint32_t size_to_read = f->get_32();
if (!_block_serializer.decompress_and_deserialize(f, size_to_read, **out_buffer)) {
if (!_block_serializer.decompress_and_deserialize(f, size_to_read, out_buffer)) {
ERR_PRINT("Failed to decompress and deserialize");
}
}
@ -90,9 +89,8 @@ VoxelStream::Result VoxelStreamBlockFiles::emerge_block(
return RESULT_BLOCK_FOUND;
}
void VoxelStreamBlockFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) {
void VoxelStreamBlockFiles::immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND(_directory_path.empty());
ERR_FAIL_COND(buffer.is_null());
if (!_meta_loaded) {
// If it's not loaded, always try to load meta file first if it exists already,
@ -109,7 +107,7 @@ void VoxelStreamBlockFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i orig
if (!_meta_saved) {
// First time we save the meta file, initialize it from the first block format
for (unsigned int i = 0; i < _meta.channel_depths.size(); ++i) {
_meta.channel_depths[i] = buffer->get_channel_depth(i);
_meta.channel_depths[i] = buffer.get_channel_depth(i);
}
VoxelFileResult res = save_meta();
ERR_FAIL_COND(res != VOXEL_FILE_OK);
@ -117,9 +115,9 @@ void VoxelStreamBlockFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i orig
// Check format
const Vector3i block_size = Vector3i(1 << _meta.block_size_po2);
ERR_FAIL_COND(buffer->get_size() != block_size);
ERR_FAIL_COND(buffer.get_size() != block_size);
for (unsigned int channel_index = 0; channel_index < _meta.channel_depths.size(); ++channel_index) {
ERR_FAIL_COND(buffer->get_channel_depth(channel_index) != _meta.channel_depths[channel_index]);
ERR_FAIL_COND(buffer.get_channel_depth(channel_index) != _meta.channel_depths[channel_index]);
}
Vector3i block_pos = get_block_position(origin_in_voxels) >> lod;
@ -143,7 +141,7 @@ void VoxelStreamBlockFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i orig
f->store_buffer((uint8_t *)FORMAT_BLOCK_MAGIC, 4);
f->store_8(FORMAT_VERSION);
VoxelBlockSerializerInternal::SerializeResult res = _block_serializer.serialize_and_compress(**buffer);
VoxelBlockSerializerInternal::SerializeResult res = _block_serializer.serialize_and_compress(buffer);
if (!res.success) {
memdelete(f);
ERR_PRINT("Failed to save block");
@ -159,7 +157,7 @@ void VoxelStreamBlockFiles::immerge_block(Ref<VoxelBuffer> buffer, Vector3i orig
int VoxelStreamBlockFiles::get_used_channels_mask() const {
// Assuming all, since that stream can store anything.
return VoxelBuffer::ALL_CHANNELS_MASK;
return VoxelBufferInternal::ALL_CHANNELS_MASK;
}
String VoxelStreamBlockFiles::get_directory() const {
@ -252,8 +250,8 @@ VoxelFileResult VoxelStreamBlockFiles::load_meta() {
for (unsigned int i = 0; i < meta.channel_depths.size(); ++i) {
uint8_t depth = f->get_8();
ERR_FAIL_COND_V(depth >= VoxelBuffer::DEPTH_COUNT, VOXEL_FILE_INVALID_DATA);
meta.channel_depths[i] = (VoxelBuffer::Depth)depth;
ERR_FAIL_COND_V(depth >= VoxelBufferInternal::DEPTH_COUNT, VOXEL_FILE_INVALID_DATA);
meta.channel_depths[i] = (VoxelBufferInternal::Depth)depth;
}
ERR_FAIL_COND_V(meta.lod_count < 1 || meta.lod_count > 32, VOXEL_FILE_INVALID_DATA);

View File

@ -15,8 +15,8 @@ class VoxelStreamBlockFiles : public VoxelStream {
public:
VoxelStreamBlockFiles();
Result emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) override;
Result emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) override;
int get_used_channels_mask() const override;
@ -43,7 +43,7 @@ private:
uint8_t version = -1;
uint8_t lod_count = 0;
uint8_t block_size_po2 = 0; // How many voxels in a block
FixedArray<VoxelBuffer::Depth, VoxelBuffer::MAX_CHANNELS> channel_depths;
FixedArray<VoxelBufferInternal::Depth, VoxelBufferInternal::MAX_CHANNELS> channel_depths;
};
Meta _meta;

View File

@ -1,8 +1,6 @@
#include "voxel_stream_cache.h"
bool VoxelStreamCache::load_voxel_block(Vector3i position, uint8_t lod_index, Ref<VoxelBuffer> &out_voxels) {
ERR_FAIL_COND_V(out_voxels.is_null(), false);
bool VoxelStreamCache::load_voxel_block(Vector3i position, uint8_t lod_index, VoxelBufferInternal &out_voxels) {
const Lod &lod = _cache[lod_index];
lod.rw_lock.read_lock();
auto it = lod.blocks.find(position);
@ -15,20 +13,18 @@ bool VoxelStreamCache::load_voxel_block(Vector3i position, uint8_t lod_index, Re
} else {
// In cache, serve it
Ref<VoxelBuffer> vb = it->second.voxels;
const VoxelBufferInternal &vb = it->second.voxels;
// Copying is required since the cache has ownership on its data,
// and the requests wants us to populate the buffer it provides
out_voxels->copy_format(**vb);
out_voxels->copy_from(**vb);
out_voxels->copy_voxel_metadata(**vb);
vb.duplicate_to(out_voxels, true);
lod.rw_lock.read_unlock();
return true;
}
}
void VoxelStreamCache::save_voxel_block(Vector3i position, uint8_t lod_index, Ref<VoxelBuffer> voxels) {
void VoxelStreamCache::save_voxel_block(Vector3i position, uint8_t lod_index, VoxelBufferInternal &voxels) {
Lod &lod = _cache[lod_index];
RWLockWrite wlock(lod.rw_lock);
auto it = lod.blocks.find(position);
@ -38,21 +34,21 @@ void VoxelStreamCache::save_voxel_block(Vector3i position, uint8_t lod_index, Re
Block b;
b.position = position;
b.lod = lod_index;
b.voxels = voxels;
// TODO Optimization: if we know the buffer is not shared, we could use move instead
voxels.duplicate_to(b.voxels, true);
b.has_voxels = true;
lod.blocks.insert(std::make_pair(position, std::move(b)));
++_count;
} else {
// Cached already, overwrite
it->second.voxels = voxels;
voxels.move_to(it->second.voxels);
it->second.has_voxels = true;
}
}
bool VoxelStreamCache::load_instance_block(
Vector3i position, uint8_t lod_index, std::unique_ptr<VoxelInstanceBlockData> &out_instances) {
const Lod &lod = _cache[lod_index];
lod.rw_lock.read_lock();
auto it = lod.blocks.find(position);
@ -81,7 +77,6 @@ bool VoxelStreamCache::load_instance_block(
void VoxelStreamCache::save_instance_block(
Vector3i position, uint8_t lod_index, std::unique_ptr<VoxelInstanceBlockData> instances) {
Lod &lod = _cache[lod_index];
RWLockWrite wlock(lod.rw_lock);
auto it = lod.blocks.find(position);

View File

@ -7,27 +7,28 @@
#include <unordered_map>
// In-memory database for voxel streams.
// It allows to cache blocks so we can save to the filesystem less frequently, or quickly reload recent blocks.
// It allows to cache blocks so we can save to the filesystem later less frequently, or quickly reload recent blocks.
class VoxelStreamCache {
public:
struct Block {
Vector3i position;
int lod;
// Because `voxels` being null has two possible meanings:
// - true: Voxel data has been erased
// - false: Voxel data should be left untouched
// Absence of voxel data can mean two things:
// - Voxel data has been erased (use case not really implemented yet, but may happen in the future)
// - Voxel data has never been saved over, so should be left untouched
bool has_voxels = false;
bool voxels_deleted = false;
Ref<VoxelBuffer> voxels;
VoxelBufferInternal voxels;
std::unique_ptr<VoxelInstanceBlockData> instances;
};
// Copies cached block into provided buffer
bool load_voxel_block(Vector3i position, uint8_t lod_index, Ref<VoxelBuffer> &out_voxels);
bool load_voxel_block(Vector3i position, uint8_t lod_index, VoxelBufferInternal &out_voxels);
// Stores provided block into the cache. The cache will take ownership of the provided data.
void save_voxel_block(Vector3i position, uint8_t lod_index, Ref<VoxelBuffer> voxels);
void save_voxel_block(Vector3i position, uint8_t lod_index, VoxelBufferInternal &voxels);
// Copies cached data into the provided pointer. A new instance will be made if found.
bool load_instance_block(
@ -54,6 +55,7 @@ public:
private:
struct Lod {
// Not using pointers for values, since unordered_map does not invalidate pointers to values
std::unordered_map<Vector3i, Block> blocks;
RWLock rw_lock;
};

View File

@ -2,22 +2,30 @@
#include "../constants/voxel_string_names.h"
#include "../util/godot/funcs.h"
VoxelStream::Result VoxelStreamScript::emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND_V(out_buffer.is_null(), RESULT_ERROR);
VoxelStream::Result VoxelStreamScript::emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) {
Variant output;
// Create a temporary wrapper so Godot can pass it to scripts
Ref<VoxelBuffer> buffer_wrapper;
buffer_wrapper.instance();
buffer_wrapper->get_buffer().copy_format(out_buffer);
buffer_wrapper->get_buffer().create(out_buffer.get_size());
if (try_call_script(this, VoxelStringNames::get_singleton()->_emerge_block,
out_buffer, origin_in_voxels.to_vec3(), lod, &output)) {
buffer_wrapper, origin_in_voxels.to_vec3(), lod, &output)) {
int res = output;
ERR_FAIL_INDEX_V(res, _RESULT_COUNT, RESULT_ERROR);
return static_cast<Result>(res);
}
// The wrapper is discarded
buffer_wrapper->get_buffer().move_to(out_buffer);
return RESULT_ERROR;
}
void VoxelStreamScript::immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) {
ERR_FAIL_COND(buffer.is_null());
void VoxelStreamScript::immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) {
Ref<VoxelBuffer> buffer_wrapper;
buffer_wrapper.instance();
buffer.duplicate_to(buffer_wrapper->get_buffer(), true);
try_call_script(this, VoxelStringNames::get_singleton()->_immerge_block,
buffer, origin_in_voxels.to_vec3(), lod, nullptr);
buffer_wrapper, origin_in_voxels.to_vec3(), lod, nullptr);
}
int VoxelStreamScript::get_used_channels_mask() const {

View File

@ -9,8 +9,8 @@
class VoxelStreamScript : public VoxelStream {
GDCLASS(VoxelStreamScript, VoxelStream)
public:
Result emerge_block(Ref<VoxelBuffer> out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(Ref<VoxelBuffer> buffer, Vector3i origin_in_voxels, int lod) override;
Result emerge_block(VoxelBufferInternal &out_buffer, Vector3i origin_in_voxels, int lod) override;
void immerge_block(VoxelBufferInternal &buffer, Vector3i origin_in_voxels, int lod) override;
int get_used_channels_mask() const override;

View File

@ -89,7 +89,7 @@ struct BeforeUnloadDataAction {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelLodTerrain::BlockToSave b;
// We don't copy since the block will be unloaded anyways
b.voxels = block->get_voxels();
b.voxels = block->get_voxels_shared();
b.position = block->position;
b.lod = block->lod_index;
blocks_to_save.push_back(b);
@ -121,8 +121,11 @@ struct ScheduleSaveAction {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelLodTerrain::BlockToSave b;
RWLockRead lock(block->get_voxels()->get_lock());
b.voxels = block->get_voxels()->duplicate(true);
b.voxels = gd_make_shared<VoxelBufferInternal>();
{
RWLockRead lock(block->get_voxels().get_lock());
block->get_voxels_const().duplicate_to(*b.voxels, true);
}
b.position = block->position;
b.lod = block->lod_index;
@ -480,7 +483,7 @@ bool VoxelLodTerrain::try_set_voxel_without_update(Vector3i pos, unsigned int ch
}
}
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBuffer &dst_buffer, uint8_t channels_mask) const {
void VoxelLodTerrain::copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) const {
const Lod &lod0 = _lods[0];
lod0.data_map.copy(p_origin_voxels, dst_buffer, channels_mask);
}
@ -522,7 +525,7 @@ void VoxelLodTerrain::post_edit_area(Box3i p_box) {
Ref<VoxelTool> VoxelLodTerrain::get_voxel_tool() {
VoxelToolLodTerrain *vt = memnew(VoxelToolLodTerrain(this));
// Set to most commonly used channel on this kind of terrain
vt->set_channel(VoxelBuffer::CHANNEL_SDF);
vt->set_channel(VoxelBufferInternal::CHANNEL_SDF);
return Ref<VoxelTool>(vt);
}
@ -1512,7 +1515,7 @@ void VoxelLodTerrain::_process(float delta) {
for (size_t reception_index = 0; reception_index < _reception_buffers.data_output.size(); ++reception_index) {
VOXEL_PROFILE_SCOPE();
const VoxelServer::BlockDataOutput &ob = _reception_buffers.data_output[reception_index];
VoxelServer::BlockDataOutput &ob = _reception_buffers.data_output[reception_index];
if (ob.type == VoxelServer::BlockDataOutput::TYPE_SAVE) {
// That's a save confirmation event.
@ -1614,7 +1617,7 @@ void VoxelLodTerrain::_process(float delta) {
// The block can actually be null on some occasions. Not sure yet if it's that bad
//CRASH_COND(nblock == nullptr);
if (nblock != nullptr) {
mesh_request.data_blocks[mesh_request.data_blocks_count] = nblock->get_voxels();
mesh_request.data_blocks[mesh_request.data_blocks_count] = nblock->get_voxels_shared();
}
++mesh_request.data_blocks_count;
});
@ -1892,8 +1895,6 @@ void VoxelLodTerrain::flush_pending_lod_edits() {
// Otherwise it means the function was called too late
CRASH_COND(src_block == nullptr);
//CRASH_COND(dst_block == nullptr);
CRASH_COND(src_block->get_voxels().is_null());
CRASH_COND(dst_block->get_voxels().is_null());
{
const Vector3i mesh_block_pos = dst_bpos.floordiv(data_to_mesh_factor);
@ -1917,9 +1918,9 @@ void VoxelLodTerrain::flush_pending_lod_edits() {
// This must always be done after an edit before it gets saved, otherwise LODs won't match and it will look ugly.
// TODO Optimization: try to narrow to edited region instead of taking whole block
{
RWLockWrite lock(src_block->get_voxels()->get_lock());
src_block->get_voxels()->downscale_to(
**dst_block->get_voxels(), Vector3i(), src_block->get_voxels()->get_size(), rel * half_bs);
RWLockWrite lock(src_block->get_voxels().get_lock());
src_block->get_voxels().downscale_to(
dst_block->get_voxels(), Vector3i(), src_block->get_voxels_const().get_size(), rel * half_bs);
}
}
@ -2507,9 +2508,7 @@ Array VoxelLodTerrain::_b_debug_print_sdf_top_down(Vector3 center, Vector3 exten
continue;
}
Ref<VoxelBuffer> buffer_ref;
buffer_ref.instance();
VoxelBuffer &buffer = **buffer_ref;
VoxelBufferInternal buffer;
buffer.create(world_box.size);
const Lod &lod = _lods[lod_index];

View File

@ -82,7 +82,7 @@ public:
bool is_area_editable(Box3i p_box) const;
uint64_t get_voxel(Vector3i pos, unsigned int channel, uint64_t defval) const;
bool try_set_voxel_without_update(Vector3i pos, unsigned int channel, uint64_t value);
void copy(Vector3i p_origin_voxels, VoxelBuffer &dst_buffer, uint8_t channels_mask) const;
void copy(Vector3i p_origin_voxels, VoxelBufferInternal &dst_buffer, uint8_t channels_mask) const;
template <typename F>
void write_box(const Box3i &p_voxel_box, unsigned int channel, F action) {
@ -151,7 +151,7 @@ public:
void remesh_all_blocks() override;
struct BlockToSave {
Ref<VoxelBuffer> voxels;
std::shared_ptr<VoxelBufferInternal> voxels;
Vector3i position;
uint8_t lod;
};

View File

@ -507,10 +507,11 @@ struct ScheduleSaveAction {
//print_line(String("Scheduling save for block {0}").format(varray(block->position.to_vec3())));
VoxelTerrain::BlockToSave b;
if (with_copy) {
RWLockRead lock(block->get_voxels()->get_lock());
b.voxels = block->get_voxels()->duplicate(true);
RWLockRead lock(block->get_voxels().get_lock());
b.voxels = gd_make_shared<VoxelBufferInternal>();
block->get_voxels_const().duplicate_to(*b.voxels, true);
} else {
b.voxels = block->get_voxels();
b.voxels = block->get_voxels_shared();
}
b.position = block->position;
blocks_to_save.push_back(b);
@ -1045,7 +1046,7 @@ void VoxelTerrain::process_received_data_blocks() {
//print_line(String("Receiving {0} blocks").format(varray(output.emerged_blocks.size())));
for (size_t i = 0; i < _reception_buffers.data_output.size(); ++i) {
const VoxelServer::BlockDataOutput &ob = _reception_buffers.data_output[i];
VoxelServer::BlockDataOutput &ob = _reception_buffers.data_output[i];
if (ob.type == VoxelServer::BlockDataOutput::TYPE_SAVE) {
if (ob.dropped) {
@ -1086,7 +1087,7 @@ void VoxelTerrain::process_received_data_blocks() {
// Now we got the block. If we still have to drop it, the cause will be an error.
_loading_blocks.erase(block_pos);
CRASH_COND(ob.voxels.is_null());
CRASH_COND(ob.voxels == nullptr);
const Vector3i expected_block_size(_data_map.get_block_size());
if (ob.voxels->get_size() != expected_block_size) {
@ -1172,7 +1173,7 @@ void VoxelTerrain::process_meshing() {
data_box.for_each_cell_zxy([this, &mesh_request](Vector3i data_block_pos) {
VoxelDataBlock *data_block = _data_map.get_block(data_block_pos);
if (data_block != nullptr) {
mesh_request.data_blocks[mesh_request.data_blocks_count] = data_block->get_voxels();
mesh_request.data_blocks[mesh_request.data_blocks_count] = data_block->get_voxels_shared();
}
++mesh_request.data_blocks_count;
});
@ -1181,7 +1182,7 @@ void VoxelTerrain::process_meshing() {
{
unsigned int count = 0;
for (unsigned int i = 0; i < mesh_request.data_blocks_count; ++i) {
if (mesh_request.data_blocks[i].is_valid()) {
if (mesh_request.data_blocks[i] != nullptr) {
++count;
}
}
@ -1272,7 +1273,7 @@ Ref<VoxelTool> VoxelTerrain::get_voxel_tool() {
Ref<VoxelTool> vt = memnew(VoxelToolTerrain(this));
const int used_channels_mask = get_used_channels_mask();
// Auto-pick first used channel
for (int channel = 0; channel < VoxelBuffer::MAX_CHANNELS; ++channel) {
for (int channel = 0; channel < VoxelBufferInternal::MAX_CHANNELS; ++channel) {
if ((used_channels_mask & (1 << channel)) != 0) {
vt->set_channel(channel);
break;

View File

@ -93,7 +93,7 @@ public:
const Stats &get_stats() const;
struct BlockToSave {
Ref<VoxelBuffer> voxels;
std::shared_ptr<VoxelBufferInternal> voxels;
Vector3i position;
};

View File

@ -71,19 +71,18 @@ void test_box3i_for_inner_outline() {
void test_voxel_data_map_paste_fill() {
static const int voxel_value = 1;
static const int default_value = 0;
static const int channel = VoxelBuffer::CHANNEL_TYPE;
static const int channel = VoxelBufferInternal::CHANNEL_TYPE;
Ref<VoxelBuffer> buffer;
buffer.instance();
buffer->create(32, 16, 32);
buffer->fill(voxel_value, channel);
VoxelBufferInternal buffer;
buffer.create(32, 16, 32);
buffer.fill(voxel_value, channel);
VoxelDataMap map;
map.create(4, 0);
const Box3i box(Vector3i(10, 10, 10), buffer->get_size());
const Box3i box(Vector3i(10, 10, 10), buffer.get_size());
map.paste(box.pos, **buffer, (1 << channel), false, 0, true);
map.paste(box.pos, buffer, (1 << channel), false, 0, true);
// All voxels in the area must be as pasted
const bool is_match = box.all_cells_match([&map](const Vector3i &pos) {
@ -108,17 +107,16 @@ void test_voxel_data_map_paste_mask() {
static const int voxel_value = 1;
static const int masked_value = 2;
static const int default_value = 0;
static const int channel = VoxelBuffer::CHANNEL_TYPE;
static const int channel = VoxelBufferInternal::CHANNEL_TYPE;
Ref<VoxelBuffer> buffer;
buffer.instance();
buffer->create(32, 16, 32);
VoxelBufferInternal buffer;
buffer.create(32, 16, 32);
// Fill the inside of the buffer with a value, and outline it with another value, which we'll use as mask
buffer->fill(masked_value, channel);
for (int z = 1; z < buffer->get_size().z - 1; ++z) {
for (int x = 1; x < buffer->get_size().x - 1; ++x) {
for (int y = 1; y < buffer->get_size().y - 1; ++y) {
buffer->set_voxel(voxel_value, x, y, z, channel);
buffer.fill(masked_value, channel);
for (int z = 1; z < buffer.get_size().z - 1; ++z) {
for (int x = 1; x < buffer.get_size().x - 1; ++x) {
for (int y = 1; y < buffer.get_size().y - 1; ++y) {
buffer.set_voxel(voxel_value, x, y, z, channel);
}
}
}
@ -126,9 +124,9 @@ void test_voxel_data_map_paste_mask() {
VoxelDataMap map;
map.create(4, 0);
const Box3i box(Vector3i(10, 10, 10), buffer->get_size());
const Box3i box(Vector3i(10, 10, 10), buffer.get_size());
map.paste(box.pos, **buffer, (1 << channel), true, masked_value, true);
map.paste(box.pos, buffer, (1 << channel), true, masked_value, true);
// All voxels in the area must be as pasted. Ignoring the outline.
const bool is_match = box.padded(-1).all_cells_match([&map](const Vector3i &pos) {
@ -181,33 +179,31 @@ void test_voxel_data_map_paste_mask() {
void test_voxel_data_map_copy() {
static const int voxel_value = 1;
static const int default_value = 0;
static const int channel = VoxelBuffer::CHANNEL_TYPE;
static const int channel = VoxelBufferInternal::CHANNEL_TYPE;
VoxelDataMap map;
map.create(4, 0);
Box3i box(10, 10, 10, 32, 16, 32);
Ref<VoxelBuffer> buffer;
buffer.instance();
buffer->create(box.size);
VoxelBufferInternal buffer;
buffer.create(box.size);
// Fill the inside of the buffer with a value, and leave outline to zero,
// so our buffer isn't just uniform
for (int z = 1; z < buffer->get_size().z - 1; ++z) {
for (int x = 1; x < buffer->get_size().x - 1; ++x) {
for (int y = 1; y < buffer->get_size().y - 1; ++y) {
buffer->set_voxel(voxel_value, x, y, z, channel);
for (int z = 1; z < buffer.get_size().z - 1; ++z) {
for (int x = 1; x < buffer.get_size().x - 1; ++x) {
for (int y = 1; y < buffer.get_size().y - 1; ++y) {
buffer.set_voxel(voxel_value, x, y, z, channel);
}
}
}
map.paste(box.pos, **buffer, (1 << channel), true, default_value, true);
map.paste(box.pos, buffer, (1 << channel), true, default_value, true);
Ref<VoxelBuffer> buffer2;
buffer2.instance();
buffer2->create(box.size);
VoxelBufferInternal buffer2;
buffer2.create(box.size);
map.copy(box.pos, **buffer2, (1 << channel));
map.copy(box.pos, buffer2, (1 << channel));
// for (int y = 0; y < buffer2->get_size().y; ++y) {
// String line = String("y={0} | ").format(varray(y));
@ -224,7 +220,7 @@ void test_voxel_data_map_copy() {
// print_line(line);
// }
ERR_FAIL_COND(!buffer->equals(**buffer2));
ERR_FAIL_COND(!buffer.equals(buffer2));
}
void test_encode_weights_packed_u16() {
@ -415,11 +411,10 @@ void test_voxel_graph_generator_texturing() {
const uint8_t WEIGHT_MAX = 240;
struct L {
static void check_weights(Ref<VoxelBuffer> buffer, Vector3i pos,
static void check_weights(VoxelBufferInternal &buffer, Vector3i pos,
bool weight0_must_be_1, bool weight1_must_be_1) {
ERR_FAIL_COND(buffer.is_null());
const uint16_t encoded_indices = buffer->get_voxel(pos, VoxelBuffer::CHANNEL_INDICES);
const uint16_t encoded_weights = buffer->get_voxel(pos, VoxelBuffer::CHANNEL_WEIGHTS);
const uint16_t encoded_indices = buffer.get_voxel(pos, VoxelBufferInternal::CHANNEL_INDICES);
const uint16_t encoded_weights = buffer.get_voxel(pos, VoxelBufferInternal::CHANNEL_WEIGHTS);
const FixedArray<uint8_t, 4> indices = decode_indices_from_packed_u16(encoded_indices);
const FixedArray<uint8_t, 4> weights = decode_weights_from_packed_u16(encoded_weights);
for (unsigned int i = 0; i < indices.size(); ++i) {
@ -448,14 +443,10 @@ void test_voxel_graph_generator_texturing() {
ERR_FAIL_COND(generator.is_null());
{
// Block centered on origin
Ref<VoxelBuffer> buffer;
buffer.instance();
buffer->create(Vector3i(16, 16, 16));
VoxelBufferInternal buffer;
buffer.create(Vector3i(16, 16, 16));
VoxelBlockRequest request;
request.lod = 0;
request.origin_in_voxels = -buffer->get_size() / 2;
request.voxel_buffer = buffer;
VoxelBlockRequest request{ buffer, -buffer.get_size() / 2, 0 };
generator->generate_block(request);
L::check_weights(buffer, Vector3i(4, 3, 8), true, false);
@ -466,26 +457,18 @@ void test_voxel_graph_generator_texturing() {
// The point is to check possible bugs due to optimizations.
// Below 0
Ref<VoxelBuffer> buffer0;
VoxelBufferInternal buffer0;
{
buffer0.instance();
buffer0->create(Vector3i(16, 16, 16));
VoxelBlockRequest request;
request.lod = 0;
request.origin_in_voxels = Vector3(0, -16, 0);
request.voxel_buffer = buffer0;
buffer0.create(Vector3i(16, 16, 16));
VoxelBlockRequest request{ buffer0, Vector3(0, -16, 0), 0 };
generator->generate_block(request);
}
// Above 0
Ref<VoxelBuffer> buffer1;
VoxelBufferInternal buffer1;
{
buffer1.instance();
buffer1->create(Vector3i(16, 16, 16));
VoxelBlockRequest request;
request.lod = 0;
request.origin_in_voxels = Vector3(0, 0, 0);
request.voxel_buffer = buffer1;
buffer1.create(Vector3i(16, 16, 16));
VoxelBlockRequest request{ buffer1, Vector3(0, 0, 0), 0 };
generator->generate_block(request);
}

View File

@ -20,6 +20,8 @@ public:
}
}
// TODO Optimization: move semantics
inline T &operator[](unsigned int i) {
#ifdef DEBUG_ENABLED
CRASH_COND(i >= N);

View File

@ -3,6 +3,7 @@
#include <core/reference.h>
#include <core/variant.h>
#include <memory>
class Mesh;
class ConcavePolygonShape;
@ -36,4 +37,10 @@ inline bool try_get_as(Ref<From_T> from, Ref<To_T> &to) {
return to.is_valid();
}
template <typename T>
inline std::shared_ptr<T> gd_make_shared() {
// std::make_shared() apparently wont allow us to specify custom new and delete
return std::shared_ptr<T>(memnew(T), memdelete<T>);
}
#endif // VOXEL_UTILITY_GODOT_FUNCS_H