Removed legacy marching cubes mesher

This commit is contained in:
Marc Gilleron 2020-01-26 20:14:56 +00:00
parent 4626b4d7a3
commit 20dc0008f4
3 changed files with 0 additions and 515 deletions

View File

@ -1,458 +0,0 @@
#include "voxel_mesher_mc.h"
#include "../transvoxel/transvoxel_tables.cpp"
#include <core/os/os.h>
namespace {
inline float tof(int8_t v) {
return static_cast<float>(v) / 256.f;
}
inline int8_t tos(uint8_t v) {
return v - 128;
}
// Values considered negative have a sign bit of 1
inline uint8_t sign(int8_t v) {
return (v >> 7) & 1;
}
//
// 6-------7
// /| /|
// / | / | Corners
// 4-------5 |
// | 2----|--3
// | / | / z y
// |/ |/ |/
// 0-------1 o--x
//
// The fact it follows a binary pattern is important
const Vector3i g_corner_dirs[8] = {
Vector3i(0, 0, 0),
Vector3i(1, 0, 0),
Vector3i(0, 1, 0),
Vector3i(1, 1, 0),
Vector3i(0, 0, 1),
Vector3i(1, 0, 1),
Vector3i(0, 1, 1),
Vector3i(1, 1, 1)
};
inline Vector3i dir_to_prev_vec(uint8_t dir) {
//return g_corner_dirs[mask] - Vector3(1,1,1);
return Vector3i(
-(dir & 1),
-((dir >> 1) & 1),
-((dir >> 2) & 1));
}
// Wrapped to invert SDF data, Transvoxel apparently works backwards?
inline uint8_t get_voxel(const VoxelBuffer &vb, int x, int y, int z, int channel) {
return 255 - vb.get_voxel(x, y, z, channel);
}
inline uint8_t get_voxel(const VoxelBuffer &vb, Vector3i pos, int channel) {
return get_voxel(vb, pos.x, pos.y, pos.z, channel);
}
inline int8_t increase(int8_t v, int8_t a) {
// Actually decreasing...
int8_t res = v - a;
if (res > v) {
// Underflowed, clamp to min
return -128;
}
return res;
}
} // namespace
VoxelMesherMC::VoxelMesherMC() {
set_padding(MIN_PADDING, MAX_PADDING);
}
void VoxelMesherMC::build(VoxelMesher::Output &output, const VoxelMesher::Input &input) {
int channel = VoxelBuffer::CHANNEL_SDF;
// Initialize dynamic memory:
// These vectors are re-used.
// We don't know in advance how much geometry we are going to produce.
// Once capacity is big enough, no more memory should be allocated
_output_vertices.clear();
_output_normals.clear();
_output_indices.clear();
const VoxelBuffer &voxels = input.voxels;
build_internal(voxels, channel);
// OS::get_singleton()->print("vertices: %i, normals: %i, indices: %i\n",
// m_output_vertices.size(),
// m_output_normals.size(),
// m_output_indices.size());
if (_output_vertices.size() == 0) {
// The mesh can be empty
return;
}
PoolVector<Vector3> vertices;
PoolVector<Vector3> normals;
PoolVector<int> indices;
raw_copy_to(vertices, _output_vertices);
raw_copy_to(normals, _output_normals);
raw_copy_to(indices, _output_indices);
Array arrays;
arrays.resize(Mesh::ARRAY_MAX);
arrays[Mesh::ARRAY_VERTEX] = vertices;
if (_output_normals.size() != 0) {
arrays[Mesh::ARRAY_NORMAL] = normals;
}
arrays[Mesh::ARRAY_INDEX] = indices;
output.surfaces.push_back(arrays);
output.primitive_type = Mesh::PRIMITIVE_TRIANGLES;
}
void VoxelMesherMC::set_seam_mode(SeamMode mode) {
_seam_mode = mode;
}
VoxelMesherMC::SeamMode VoxelMesherMC::get_seam_mode() const {
return _seam_mode;
}
void VoxelMesherMC::build_internal(const VoxelBuffer &voxels, unsigned int channel) {
// Each 2x2 voxel group is a "cell"
if (voxels.is_uniform(channel)) {
// Nothing to extract, because constant isolevels never cross the threshold and describe no surface
return;
}
Vector3i block_size = voxels.get_size();
// Iterate all cells, with expected padding.
// The algorithm works with a 2x2 kernel and needs extra neighbors for normals,
// so it looks 1 voxel away in negative axes, and 2 voxels away in positive axes.
Vector3i pos;
Vector3i min_pos(get_minimum_padding());
Vector3i max_pos(block_size - Vector3i(get_maximum_padding()));
if (_seam_mode == SEAM_OVERLAP) {
// When this is enabled, the algorithm may detect if it's on a border,
// and will avoid looking an extra neighbor for normals, while polygonizing an extra cell.
min_pos -= Vector3i(1);
max_pos += Vector3i(1);
block_size += Vector3i(2);
}
// Prepare vertex reuse cache:
// We'll iterate deck by deck in deterministic order, so we can link vertices together from the previous deck.
_block_size = block_size;
unsigned int deck_area = block_size.x * block_size.y;
for (int i = 0; i < 2; ++i) {
_cache[i].clear();
_cache[i].resize(deck_area);
}
bool overpoly_bx[2] = { false };
bool overpoly_by[2] = { false };
bool overpoly_bz[2] = { false };
for (pos.z = min_pos.z; pos.z < max_pos.z; ++pos.z) {
for (pos.y = min_pos.y; pos.y < max_pos.y; ++pos.y) {
for (pos.x = min_pos.x; pos.x < max_pos.x; ++pos.x) {
// Get the value of cells.
// Negative values are "solid" and positive are "air".
// Due to raw cells being unsigned 8-bit, they get converted to signed.
int8_t cell_samples[8] = {
tos(get_voxel(voxels, pos.x, pos.y, pos.z, channel)),
tos(get_voxel(voxels, pos.x + 1, pos.y, pos.z, channel)),
tos(get_voxel(voxels, pos.x, pos.y + 1, pos.z, channel)),
tos(get_voxel(voxels, pos.x + 1, pos.y + 1, pos.z, channel)),
tos(get_voxel(voxels, pos.x, pos.y, pos.z + 1, channel)),
tos(get_voxel(voxels, pos.x + 1, pos.y, pos.z + 1, channel)),
tos(get_voxel(voxels, pos.x, pos.y + 1, pos.z + 1, channel)),
tos(get_voxel(voxels, pos.x + 1, pos.y + 1, pos.z + 1, channel))
};
if (_seam_mode == SEAM_OVERLAP) {
// In overpoly, we extend the polygonized area, but the extended vertices will have increased distance samples.
// The intented effect is that the isosurface will be slightly contracted,
// so if we stitch two chunks of different LOD with overpolys,
// they will overlap and will fill cracks by crossing over each other.
overpoly_bx[0] = pos.x == min_pos.x;
overpoly_bx[1] = pos.x == max_pos.x - 1;
overpoly_by[0] = pos.y == min_pos.y;
overpoly_by[1] = pos.y == max_pos.y - 1;
overpoly_bz[0] = pos.z == min_pos.z;
overpoly_bz[1] = pos.z == max_pos.z - 1;
const int8_t inc = 32;
for (unsigned int i = 0; i < 8; ++i) {
if (overpoly_bx[i & 1] || overpoly_by[(i >> 1) & 1] || overpoly_bz[(i >> 2) & 1]) {
cell_samples[i] = increase(cell_samples[i], inc);
}
}
}
// Concatenate the sign of cell values to obtain the case code.
// Index 0 is the less significant bit, and index 7 is the most significant bit.
uint8_t case_code = sign(cell_samples[0]);
case_code |= (sign(cell_samples[1]) << 1);
case_code |= (sign(cell_samples[2]) << 2);
case_code |= (sign(cell_samples[3]) << 3);
case_code |= (sign(cell_samples[4]) << 4);
case_code |= (sign(cell_samples[5]) << 5);
case_code |= (sign(cell_samples[6]) << 6);
case_code |= (sign(cell_samples[7]) << 7);
{
ReuseCell &rc = get_reuse_cell(pos);
rc.case_index = case_code;
}
if (case_code == 0 || case_code == 255) {
// If the case_code is 0 or 255, there is no triangulation to do
continue;
}
// TODO We might not always need all of them
// Compute normals
Vector3 corner_normals[8];
for (unsigned int i = 0; i < 8; ++i) {
Vector3i p = pos + g_corner_dirs[i];
if (_seam_mode == SEAM_OVERLAP) {
// In case of overpoly, we keep the same normals as the connected vertex within the "normal" area
if (overpoly_bx[0]) {
++p.x;
}
if (overpoly_bx[1]) {
--p.x;
}
if (overpoly_by[0]) {
++p.y;
}
if (overpoly_by[1]) {
--p.y;
}
if (overpoly_bz[0]) {
++p.z;
}
if (overpoly_bz[1]) {
--p.z;
}
}
float nx = tof(tos(get_voxel(voxels, p - Vector3i(1, 0, 0), channel))) - tof(tos(get_voxel(voxels, p + Vector3i(1, 0, 0), channel)));
float ny = tof(tos(get_voxel(voxels, p - Vector3i(0, 1, 0), channel))) - tof(tos(get_voxel(voxels, p + Vector3i(0, 1, 0), channel)));
float nz = tof(tos(get_voxel(voxels, p - Vector3i(0, 0, 1), channel))) - tof(tos(get_voxel(voxels, p + Vector3i(0, 0, 1), channel)));
corner_normals[i] = Vector3(nx, ny, nz);
corner_normals[i].normalize();
}
// For cells occurring along the minimal boundaries of a block,
// the preceding cells needed for vertex reuse may not exist.
// In these cases, we allow new vertex creation on additional edges of a cell.
// While iterating through the cells in a block, a 3-bit mask is maintained whose bits indicate
// whether corresponding bits in a direction code are valid
uint8_t direction_validity_mask =
(pos.x > min_pos.x ? 1 : 0) |
((pos.y > min_pos.y ? 1 : 0) << 1) |
((pos.z > min_pos.z ? 1 : 0) << 2);
uint8_t regular_cell_class_index = Transvoxel::regularCellClass[case_code];
Transvoxel::RegularCellData regular_cell_class = Transvoxel::regularCellData[regular_cell_class_index];
uint8_t triangle_count = regular_cell_class.geometryCounts & 0x0f;
uint8_t vertex_count = (regular_cell_class.geometryCounts & 0xf0) >> 4;
int cell_mesh_indices[12];
// For each vertex in the case
for (unsigned int i = 0; i < vertex_count; ++i) {
// The case index maps to a list of 16-bit codes providing information about the edges on which the vertices lie.
// The low byte of each 16-bit code contains the corner indexes of the edges endpoints in one nibble each,
// and the high byte contains the mapping code shown in Figure 3.8(b)
unsigned short rvd = Transvoxel::regularVertexData[case_code][i];
unsigned short edge_code_low = rvd & 0xff;
unsigned short edge_code_high = (rvd >> 8) & 0xff;
// Get corner indexes in the low nibble (always ordered so the higher comes last)
uint8_t v0 = (edge_code_low >> 4) & 0xf;
uint8_t v1 = edge_code_low & 0xf;
ERR_FAIL_COND(v1 <= v0);
// Get voxel values at the corners
int sample0 = cell_samples[v0]; // called d0 in the paper
int sample1 = cell_samples[v1]; // called d1 in the paper
// TODO Zero-division is not mentionned in the paper??
ERR_FAIL_COND(sample1 == sample0);
ERR_FAIL_COND(sample1 == 0 && sample0 == 0);
// Get interpolation position
// We use an 8-bit fraction, allowing the new vertex to be located at one of 257 possible
// positions along the edge when both endpoints are included.
int t = (sample1 << 8) / (sample1 - sample0);
float t0 = static_cast<float>(t) / 256.f;
float t1 = static_cast<float>(0x0100 - t) / 256.f;
Vector3i p0 = pos + g_corner_dirs[v0];
Vector3i p1 = pos + g_corner_dirs[v1];
if (t & 0xff) {
// Vertex lies in the interior of the edge.
// Each edge of a cell is assigned an 8-bit code, as shown in Figure 3.8(b),
// that provides a mapping to a preceding cell and the coincident edge on that preceding cell
// for which new vertex creation was allowed.
// The high nibble of this code indicates which direction to go in order to reach the correct preceding cell.
// The bit values 1, 2, and 4 in this nibble indicate that we must subtract one
// from the x, y, and/or z coordinate, respectively.
uint8_t reuse_dir = (edge_code_high >> 4) & 0xf;
uint8_t reuse_vertex_index = edge_code_high & 0xf;
bool can_reuse = (reuse_dir & direction_validity_mask) == reuse_dir;
if (can_reuse) {
Vector3i cache_pos = pos + dir_to_prev_vec(reuse_dir);
ReuseCell &prev_cell = get_reuse_cell(cache_pos);
if (prev_cell.case_index == 0 || prev_cell.case_index == 255) {
// TODO I don't think this can happen for non-corner vertices.
cell_mesh_indices[i] = -1;
} else {
// Will reuse a previous vertice
cell_mesh_indices[i] = prev_cell.vertices[reuse_vertex_index];
}
}
if (!can_reuse || cell_mesh_indices[i] == -1) {
// Going to create a new vertice
cell_mesh_indices[i] = _output_vertices.size();
Vector3 pi = p0.to_vec3() * t0 + p1.to_vec3() * t1;
Vector3 primary = pi; //pos.to_vec3() + pi;
Vector3 normal = corner_normals[v0] * t0 + corner_normals[v1] * t1;
emit_vertex(primary, normal);
if (reuse_dir & 8) {
// Store the generated vertex so that other cells can reuse it.
ReuseCell &rc = get_reuse_cell(pos);
rc.vertices[reuse_vertex_index] = cell_mesh_indices[i];
}
}
} else if (t == 0 && v1 == 7) {
// This cell owns the vertex, so it should be created.
cell_mesh_indices[i] = _output_vertices.size();
Vector3 pi = p0.to_vec3() * t0 + p1.to_vec3() * t1;
Vector3 primary = pi; //pos.to_vec3() + pi;
Vector3 normal = corner_normals[v0] * t0 + corner_normals[v1] * t1;
emit_vertex(primary, normal);
ReuseCell &rc = get_reuse_cell(pos);
rc.vertices[0] = cell_mesh_indices[i];
} else {
// Always try to reuse previous vertices in these cases
// A 3-bit direction code leading to the proper cell can easily be obtained by
// inverting the 3-bit corner index (bitwise, by exclusive ORing with the number 7).
// The corner index depends on the value of t, t = 0 means that we're at the higher
// numbered endpoint.
uint8_t reuse_dir = (t == 0 ? v1 ^ 7 : v0 ^ 7);
bool can_reuse = (reuse_dir & direction_validity_mask) == reuse_dir;
// Note: the only difference with similar code above is that we take vertice 0 in the `else`
if (can_reuse) {
Vector3i cache_pos = pos + dir_to_prev_vec(reuse_dir);
ReuseCell prev_cell = get_reuse_cell(cache_pos);
// The previous cell might not have any geometry, and we
// might therefore have to create a new vertex anyway.
if (prev_cell.case_index == 0 || prev_cell.case_index == 255) {
cell_mesh_indices[i] = -1;
} else {
cell_mesh_indices[i] = prev_cell.vertices[0];
}
}
if (!can_reuse || cell_mesh_indices[i] < 0) {
cell_mesh_indices[i] = _output_vertices.size();
Vector3 pi = p0.to_vec3() * t0 + p1.to_vec3() * t1;
Vector3 primary = pi; //pos.to_vec3() + pi;
Vector3 normal = corner_normals[v0] * t0 + corner_normals[v1] * t1;
emit_vertex(primary, normal);
}
}
} // for each cell vertice
for (int t = 0; t < triangle_count; ++t) {
for (int i = 0; i < 3; ++i) {
int index = cell_mesh_indices[regular_cell_class.vertexIndex[t * 3 + i]];
_output_indices.push_back(index);
}
}
} // x
} // y
} // z
}
VoxelMesherMC::ReuseCell &VoxelMesherMC::get_reuse_cell(Vector3i pos) {
// CRASH_COND(pos.x < 0);
// CRASH_COND(pos.y < 0);
// CRASH_COND(pos.z < 0);
// CRASH_COND(pos.x >= _block_size.x);
// CRASH_COND(pos.y >= _block_size.y);
// CRASH_COND(pos.z >= _block_size.z);
int j = pos.z & 1;
int i = pos.y * _block_size.y + pos.x;
return _cache[j][i];
}
void VoxelMesherMC::emit_vertex(Vector3 primary, Vector3 normal) {
_output_vertices.push_back(primary - Vector3(MIN_PADDING, MIN_PADDING, MIN_PADDING));
_output_normals.push_back(normal);
}
VoxelMesher *VoxelMesherMC::clone() {
VoxelMesherMC *c = memnew(VoxelMesherMC);
c->_seam_mode = _seam_mode;
return c;
}
void VoxelMesherMC::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_seam_mode", "mode"), &VoxelMesherMC::set_seam_mode);
ClassDB::bind_method(D_METHOD("get_seam_mode"), &VoxelMesherMC::get_seam_mode);
BIND_ENUM_CONSTANT(SEAM_NONE);
BIND_ENUM_CONSTANT(SEAM_OVERLAP);
}

View File

@ -1,55 +0,0 @@
#ifndef VOXEL_MESHER_MC_H
#define VOXEL_MESHER_MC_H
#include "../voxel_mesher.h"
// TODO Remove it.
// Simple marching cubes.
// Implementation is simplified from old Transvoxel code.
class VoxelMesherMC : public VoxelMesher {
GDCLASS(VoxelMesherMC, VoxelMesher)
public:
static const int MIN_PADDING = 1;
static const int MAX_PADDING = 2;
enum SeamMode {
SEAM_NONE,
SEAM_OVERLAP
};
VoxelMesherMC();
void build(VoxelMesher::Output &output, const VoxelMesher::Input &input) override;
void set_seam_mode(SeamMode mode);
SeamMode get_seam_mode() const;
VoxelMesher *clone() override;
protected:
static void _bind_methods();
private:
struct ReuseCell {
int vertices[4] = { -1 };
int case_index = 0;
};
void build_internal(const VoxelBuffer &voxels, unsigned int channel);
ReuseCell &get_reuse_cell(Vector3i pos);
void emit_vertex(Vector3 primary, Vector3 normal);
private:
std::vector<ReuseCell> _cache[2];
Vector3i _block_size;
SeamMode _seam_mode = SEAM_NONE;
std::vector<Vector3> _output_vertices;
std::vector<Vector3> _output_normals;
std::vector<int> _output_indices;
};
VARIANT_ENUM_CAST(VoxelMesherMC::SeamMode)
#endif // VOXEL_MESHER_MC_H

View File

@ -1,7 +1,6 @@
#include "register_types.h"
#include "meshers/blocky/voxel_mesher_blocky.h"
#include "meshers/dmc/voxel_mesher_dmc.h"
#include "meshers/mc/voxel_mesher_mc.h"
#include "meshers/transvoxel/voxel_mesher_transvoxel.h"
#include "streams/voxel_stream_block_files.h"
#include "streams/voxel_stream_file.h"
@ -56,7 +55,6 @@ void register_voxel_types() {
ClassDB::register_class<VoxelMesherBlocky>();
ClassDB::register_class<VoxelMesherTransvoxel>();
ClassDB::register_class<VoxelMesherDMC>();
ClassDB::register_class<VoxelMesherMC>();
VoxelMemoryPool::create_singleton();
VoxelStringNames::create_singleton();