Optimized map saving and sending (server-side)
parent
42fb1ba676
commit
2830095366
|
@ -62,8 +62,8 @@ void set_default_settings()
|
||||||
g_settings.setDefault("max_simultaneous_block_sends_per_client", "1");
|
g_settings.setDefault("max_simultaneous_block_sends_per_client", "1");
|
||||||
//g_settings.setDefault("max_simultaneous_block_sends_per_client", "2");
|
//g_settings.setDefault("max_simultaneous_block_sends_per_client", "2");
|
||||||
g_settings.setDefault("max_simultaneous_block_sends_server_total", "4");
|
g_settings.setDefault("max_simultaneous_block_sends_server_total", "4");
|
||||||
g_settings.setDefault("max_block_send_distance", "7");
|
g_settings.setDefault("max_block_send_distance", "8");
|
||||||
g_settings.setDefault("max_block_generate_distance", "7");
|
g_settings.setDefault("max_block_generate_distance", "8");
|
||||||
g_settings.setDefault("time_send_interval", "20");
|
g_settings.setDefault("time_send_interval", "20");
|
||||||
g_settings.setDefault("time_speed", "96");
|
g_settings.setDefault("time_speed", "96");
|
||||||
g_settings.setDefault("server_unload_unused_sectors_timeout", "60");
|
g_settings.setDefault("server_unload_unused_sectors_timeout", "60");
|
||||||
|
|
|
@ -219,6 +219,9 @@ SUGG: MovingObject::move and Player::move are basically the same.
|
||||||
- NOTE: Player::move is more up-to-date.
|
- NOTE: Player::move is more up-to-date.
|
||||||
- NOTE: There is a simple move implementation now in collision.{h,cpp}
|
- NOTE: There is a simple move implementation now in collision.{h,cpp}
|
||||||
|
|
||||||
|
SUGG: Server-side objects could be moved based on nodes to enable very
|
||||||
|
lightweight operation and simple AI
|
||||||
|
|
||||||
Map:
|
Map:
|
||||||
----
|
----
|
||||||
|
|
||||||
|
|
14
src/map.cpp
14
src/map.cpp
|
@ -1792,7 +1792,8 @@ void Map::nodeMetadataStep(float dtime,
|
||||||
|
|
||||||
ServerMap::ServerMap(std::string savedir):
|
ServerMap::ServerMap(std::string savedir):
|
||||||
Map(dout_server),
|
Map(dout_server),
|
||||||
m_seed(0)
|
m_seed(0),
|
||||||
|
m_map_metadata_changed(true)
|
||||||
{
|
{
|
||||||
dstream<<__FUNCTION_NAME<<std::endl;
|
dstream<<__FUNCTION_NAME<<std::endl;
|
||||||
|
|
||||||
|
@ -4797,12 +4798,17 @@ void ServerMap::save(bool only_changed)
|
||||||
dstream<<DTIME<<"ServerMap: Saving whole map, this can take time."
|
dstream<<DTIME<<"ServerMap: Saving whole map, this can take time."
|
||||||
<<std::endl;
|
<<std::endl;
|
||||||
|
|
||||||
saveMapMeta();
|
if(only_changed == false || m_map_metadata_changed)
|
||||||
|
{
|
||||||
|
saveMapMeta();
|
||||||
|
m_map_metadata_changed = false;
|
||||||
|
}
|
||||||
|
|
||||||
// Disable saving chunk metadata file if chunks are disabled
|
// Disable saving chunk metadata if chunks are disabled
|
||||||
if(m_chunksize != 0)
|
if(m_chunksize != 0)
|
||||||
{
|
{
|
||||||
saveChunkMeta();
|
if(only_changed == false || anyChunkModified())
|
||||||
|
saveChunkMeta();
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 sector_meta_count = 0;
|
u32 sector_meta_count = 0;
|
||||||
|
|
35
src/map.h
35
src/map.h
|
@ -402,6 +402,35 @@ public:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Returns true if any chunk is marked as modified
|
||||||
|
*/
|
||||||
|
bool anyChunkModified()
|
||||||
|
{
|
||||||
|
for(core::map<v2s16, MapChunk*>::Iterator
|
||||||
|
i = m_chunks.getIterator();
|
||||||
|
i.atEnd()==false; i++)
|
||||||
|
{
|
||||||
|
v2s16 p = i.getNode()->getKey();
|
||||||
|
MapChunk *chunk = i.getNode()->getValue();
|
||||||
|
if(chunk->isModified())
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setChunksNonModified()
|
||||||
|
{
|
||||||
|
for(core::map<v2s16, MapChunk*>::Iterator
|
||||||
|
i = m_chunks.getIterator();
|
||||||
|
i.atEnd()==false; i++)
|
||||||
|
{
|
||||||
|
v2s16 p = i.getNode()->getKey();
|
||||||
|
MapChunk *chunk = i.getNode()->getValue();
|
||||||
|
chunk->setModified(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Chunks are generated by using these and makeChunk().
|
Chunks are generated by using these and makeChunk().
|
||||||
*/
|
*/
|
||||||
|
@ -573,6 +602,12 @@ private:
|
||||||
s16 m_chunksize;
|
s16 m_chunksize;
|
||||||
// Chunks
|
// Chunks
|
||||||
core::map<v2s16, MapChunk*> m_chunks;
|
core::map<v2s16, MapChunk*> m_chunks;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Metadata is re-written on disk only if this is true.
|
||||||
|
This is reset to false when written on disk.
|
||||||
|
*/
|
||||||
|
bool m_map_metadata_changed;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -36,7 +36,8 @@ class MapChunk
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
MapChunk():
|
MapChunk():
|
||||||
m_generation_level(GENERATED_NOT)
|
m_generation_level(GENERATED_NOT),
|
||||||
|
m_modified(true)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,8 +59,12 @@ public:
|
||||||
is.read((char*)&m_generation_level, 1);
|
is.read((char*)&m_generation_level, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isModified(){ return m_modified; }
|
||||||
|
void setModified(bool modified){ m_modified = modified; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u8 m_generation_level;
|
u8 m_generation_level;
|
||||||
|
bool m_modified;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
162
src/server.cpp
162
src/server.cpp
|
@ -306,8 +306,15 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
{
|
{
|
||||||
DSTACK(__FUNCTION_NAME);
|
DSTACK(__FUNCTION_NAME);
|
||||||
|
|
||||||
|
/*u32 timer_result;
|
||||||
|
TimeTaker timer("RemoteClient::GetNextBlocks", &timer_result);*/
|
||||||
|
|
||||||
// Increment timers
|
// Increment timers
|
||||||
m_nearest_unsent_reset_timer += dtime;
|
m_nearest_unsent_reset_timer += dtime;
|
||||||
|
m_nothing_to_send_pause_timer -= dtime;
|
||||||
|
|
||||||
|
if(m_nothing_to_send_pause_timer >= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
// Won't send anything if already sending
|
// Won't send anything if already sending
|
||||||
if(m_blocks_sending.size() >= g_settings.getU16
|
if(m_blocks_sending.size() >= g_settings.getU16
|
||||||
|
@ -338,8 +345,6 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
/*
|
/*
|
||||||
Get the starting value of the block finder radius.
|
Get the starting value of the block finder radius.
|
||||||
*/
|
*/
|
||||||
s16 last_nearest_unsent_d;
|
|
||||||
s16 d_start;
|
|
||||||
|
|
||||||
if(m_last_center != center)
|
if(m_last_center != center)
|
||||||
{
|
{
|
||||||
|
@ -356,14 +361,14 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
//dstream<<"Resetting m_nearest_unsent_d"<<std::endl;
|
//dstream<<"Resetting m_nearest_unsent_d"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
last_nearest_unsent_d = m_nearest_unsent_d;
|
//s16 last_nearest_unsent_d = m_nearest_unsent_d;
|
||||||
|
s16 d_start = m_nearest_unsent_d;
|
||||||
|
|
||||||
d_start = m_nearest_unsent_d;
|
//dstream<<"d_start="<<d_start<<std::endl;
|
||||||
|
|
||||||
u16 maximum_simultaneous_block_sends_setting = g_settings.getU16
|
u16 max_simul_sends_setting = g_settings.getU16
|
||||||
("max_simultaneous_block_sends_per_client");
|
("max_simultaneous_block_sends_per_client");
|
||||||
u16 maximum_simultaneous_block_sends =
|
u16 max_simul_sends_usually = max_simul_sends_setting;
|
||||||
maximum_simultaneous_block_sends_setting;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Check the time from last addNode/removeNode.
|
Check the time from last addNode/removeNode.
|
||||||
|
@ -374,10 +379,13 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
if(m_time_from_building < g_settings.getFloat(
|
if(m_time_from_building < g_settings.getFloat(
|
||||||
"full_block_send_enable_min_time_from_building"))
|
"full_block_send_enable_min_time_from_building"))
|
||||||
{
|
{
|
||||||
maximum_simultaneous_block_sends
|
max_simul_sends_usually
|
||||||
= LIMITED_MAX_SIMULTANEOUS_BLOCK_SENDS;
|
= LIMITED_MAX_SIMULTANEOUS_BLOCK_SENDS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Number of blocks sending + number of blocks selected for sending
|
||||||
|
*/
|
||||||
u32 num_blocks_selected = m_blocks_sending.size();
|
u32 num_blocks_selected = m_blocks_sending.size();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -394,6 +402,8 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
|
|
||||||
//dstream<<"Starting from "<<d_start<<std::endl;
|
//dstream<<"Starting from "<<d_start<<std::endl;
|
||||||
|
|
||||||
|
bool sending_something = false;
|
||||||
|
|
||||||
for(s16 d = d_start; d <= d_max; d++)
|
for(s16 d = d_start; d <= d_max; d++)
|
||||||
{
|
{
|
||||||
//dstream<<"RemoteClient::SendBlocks(): d="<<d<<std::endl;
|
//dstream<<"RemoteClient::SendBlocks(): d="<<d<<std::endl;
|
||||||
|
@ -404,11 +414,11 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
update our d to it.
|
update our d to it.
|
||||||
Else update m_nearest_unsent_d
|
Else update m_nearest_unsent_d
|
||||||
*/
|
*/
|
||||||
if(m_nearest_unsent_d != last_nearest_unsent_d)
|
/*if(m_nearest_unsent_d != last_nearest_unsent_d)
|
||||||
{
|
{
|
||||||
d = m_nearest_unsent_d;
|
d = m_nearest_unsent_d;
|
||||||
last_nearest_unsent_d = m_nearest_unsent_d;
|
last_nearest_unsent_d = m_nearest_unsent_d;
|
||||||
}
|
}*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Get the border/face dot coordinates of a "d-radiused"
|
Get the border/face dot coordinates of a "d-radiused"
|
||||||
|
@ -430,25 +440,18 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
Also, don't send blocks that are already flying.
|
Also, don't send blocks that are already flying.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
u16 maximum_simultaneous_block_sends_now =
|
// Start with the usual maximum
|
||||||
maximum_simultaneous_block_sends;
|
u16 max_simul_dynamic = max_simul_sends_usually;
|
||||||
|
|
||||||
|
// If block is very close, allow full maximum
|
||||||
if(d <= BLOCK_SEND_DISABLE_LIMITS_MAX_D)
|
if(d <= BLOCK_SEND_DISABLE_LIMITS_MAX_D)
|
||||||
{
|
max_simul_dynamic = max_simul_sends_setting;
|
||||||
maximum_simultaneous_block_sends_now =
|
|
||||||
maximum_simultaneous_block_sends_setting;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit is dynamically lowered when building
|
// Don't select too many blocks for sending
|
||||||
if(num_blocks_selected
|
if(num_blocks_selected >= max_simul_dynamic)
|
||||||
>= maximum_simultaneous_block_sends_now)
|
|
||||||
{
|
|
||||||
/*dstream<<"Not sending more blocks. Queue full. "
|
|
||||||
<<m_blocks_sending.size()
|
|
||||||
<<std::endl;*/
|
|
||||||
goto queue_full;
|
goto queue_full;
|
||||||
}
|
|
||||||
|
|
||||||
|
// Don't send blocks that are currently being transferred
|
||||||
if(m_blocks_sending.find(p) != NULL)
|
if(m_blocks_sending.find(p) != NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -476,37 +479,35 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 1
|
||||||
/*
|
/*
|
||||||
If block is far away, don't generate it unless it is
|
If block is far away, don't generate it unless it is
|
||||||
near ground level
|
near ground level.
|
||||||
|
|
||||||
NOTE: We can't know the ground level this way with the
|
|
||||||
new generator.
|
|
||||||
*/
|
*/
|
||||||
if(d > 4)
|
if(d >= 4)
|
||||||
{
|
{
|
||||||
v2s16 p2d(p.X, p.Z);
|
#if 1
|
||||||
MapSector *sector = NULL;
|
// Block center y in nodes
|
||||||
try
|
f32 y = (f32)(p.Y * MAP_BLOCKSIZE + MAP_BLOCKSIZE/2);
|
||||||
{
|
// Don't generate if it's very high or very low
|
||||||
sector = server->m_env.getMap().getSectorNoGenerate(p2d);
|
if(y < -64 || y > 64)
|
||||||
}
|
generate = false;
|
||||||
catch(InvalidPositionException &e)
|
#endif
|
||||||
{
|
#if 0
|
||||||
}
|
v2s16 p2d_nodes_center(
|
||||||
|
MAP_BLOCKSIZE*p.X,
|
||||||
|
MAP_BLOCKSIZE*p.Z);
|
||||||
|
|
||||||
if(sector != NULL)
|
// Get ground height in nodes
|
||||||
{
|
s16 gh = server->m_env.getServerMap().findGroundLevel(
|
||||||
// Get center ground height in nodes
|
p2d_nodes_center);
|
||||||
f32 gh = sector->getGroundHeight(
|
|
||||||
v2s16(MAP_BLOCKSIZE/2, MAP_BLOCKSIZE/2));
|
// If differs a lot, don't generate
|
||||||
// Block center y in nodes
|
if(fabs(gh - y) > MAP_BLOCKSIZE*2)
|
||||||
f32 y = (f32)(p.Y * MAP_BLOCKSIZE + MAP_BLOCKSIZE/2);
|
generate = false;
|
||||||
// If differs a lot, don't generate
|
// Actually, don't even send it
|
||||||
if(fabs(gh - y) > MAP_BLOCKSIZE*2)
|
//continue;
|
||||||
generate = false;
|
#endif
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -556,6 +557,20 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
v2s16 chunkpos = map->sector_to_chunk(p2d);
|
v2s16 chunkpos = map->sector_to_chunk(p2d);
|
||||||
if(map->chunkNonVolatile(chunkpos) == false)
|
if(map->chunkNonVolatile(chunkpos) == false)
|
||||||
block_is_invalid = true;
|
block_is_invalid = true;
|
||||||
|
#if 1
|
||||||
|
/*
|
||||||
|
If block is not close, don't send it unless it is near
|
||||||
|
ground level.
|
||||||
|
|
||||||
|
Block is not near ground level if night-time mesh
|
||||||
|
doesn't differ from day-time mesh.
|
||||||
|
*/
|
||||||
|
if(d >= 3)
|
||||||
|
{
|
||||||
|
if(block->dayNightDiffed() == false)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -574,7 +589,8 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
*/
|
*/
|
||||||
if(new_nearest_unsent_d == -1 || d < new_nearest_unsent_d)
|
if(new_nearest_unsent_d == -1 || d < new_nearest_unsent_d)
|
||||||
{
|
{
|
||||||
new_nearest_unsent_d = d;
|
if(generate == true)
|
||||||
|
new_nearest_unsent_d = d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -612,6 +628,7 @@ void RemoteClient::GetNextBlocks(Server *server, float dtime,
|
||||||
dest.push_back(q);
|
dest.push_back(q);
|
||||||
|
|
||||||
num_blocks_selected += 1;
|
num_blocks_selected += 1;
|
||||||
|
sending_something = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
queue_full:
|
queue_full:
|
||||||
|
@ -620,6 +637,24 @@ queue_full:
|
||||||
{
|
{
|
||||||
m_nearest_unsent_d = new_nearest_unsent_d;
|
m_nearest_unsent_d = new_nearest_unsent_d;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(sending_something == false)
|
||||||
|
{
|
||||||
|
m_nothing_to_send_counter++;
|
||||||
|
if(m_nothing_to_send_counter >= 3)
|
||||||
|
{
|
||||||
|
// Pause time in seconds
|
||||||
|
m_nothing_to_send_pause_timer = 2.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_nothing_to_send_counter = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*timer_result = timer.stop(true);
|
||||||
|
if(timer_result != 0)
|
||||||
|
dstream<<"GetNextBlocks duration: "<<timer_result<<" (!=0)"<<std::endl;*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void RemoteClient::SendObjectData(
|
void RemoteClient::SendObjectData(
|
||||||
|
@ -3402,6 +3437,30 @@ void Server::sendAddNode(v3s16 p, MapNode n, u16 ignore_id,
|
||||||
void Server::SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver)
|
void Server::SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver)
|
||||||
{
|
{
|
||||||
DSTACK(__FUNCTION_NAME);
|
DSTACK(__FUNCTION_NAME);
|
||||||
|
|
||||||
|
v3s16 p = block->getPos();
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// Analyze it a bit
|
||||||
|
bool completely_air = true;
|
||||||
|
for(s16 z0=0; z0<MAP_BLOCKSIZE; z0++)
|
||||||
|
for(s16 x0=0; x0<MAP_BLOCKSIZE; x0++)
|
||||||
|
for(s16 y0=0; y0<MAP_BLOCKSIZE; y0++)
|
||||||
|
{
|
||||||
|
if(block->getNodeNoEx(v3s16(x0,y0,z0)).d != CONTENT_AIR)
|
||||||
|
{
|
||||||
|
completely_air = false;
|
||||||
|
x0 = y0 = z0 = MAP_BLOCKSIZE; // Break out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print result
|
||||||
|
dstream<<"Server: Sending block ("<<p.X<<","<<p.Y<<","<<p.Z<<"): ";
|
||||||
|
if(completely_air)
|
||||||
|
dstream<<"[completely air] ";
|
||||||
|
dstream<<std::endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Create a packet with the block in the right format
|
Create a packet with the block in the right format
|
||||||
*/
|
*/
|
||||||
|
@ -3413,7 +3472,6 @@ void Server::SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver)
|
||||||
|
|
||||||
u32 replysize = 8 + blockdata.getSize();
|
u32 replysize = 8 + blockdata.getSize();
|
||||||
SharedBuffer<u8> reply(replysize);
|
SharedBuffer<u8> reply(replysize);
|
||||||
v3s16 p = block->getPos();
|
|
||||||
writeU16(&reply[0], TOCLIENT_BLOCKDATA);
|
writeU16(&reply[0], TOCLIENT_BLOCKDATA);
|
||||||
writeS16(&reply[2], p.X);
|
writeS16(&reply[2], p.X);
|
||||||
writeS16(&reply[4], p.Y);
|
writeS16(&reply[4], p.Y);
|
||||||
|
|
|
@ -250,6 +250,8 @@ public:
|
||||||
pending_serialization_version = SER_FMT_VER_INVALID;
|
pending_serialization_version = SER_FMT_VER_INVALID;
|
||||||
m_nearest_unsent_d = 0;
|
m_nearest_unsent_d = 0;
|
||||||
m_nearest_unsent_reset_timer = 0.0;
|
m_nearest_unsent_reset_timer = 0.0;
|
||||||
|
m_nothing_to_send_counter = 0;
|
||||||
|
m_nothing_to_send_pause_timer = 0;
|
||||||
}
|
}
|
||||||
~RemoteClient()
|
~RemoteClient()
|
||||||
{
|
{
|
||||||
|
@ -350,6 +352,10 @@ private:
|
||||||
This is resetted by PrintInfo()
|
This is resetted by PrintInfo()
|
||||||
*/
|
*/
|
||||||
u32 m_excess_gotblocks;
|
u32 m_excess_gotblocks;
|
||||||
|
|
||||||
|
// CPU usage optimization
|
||||||
|
u32 m_nothing_to_send_counter;
|
||||||
|
float m_nothing_to_send_pause_timer;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Server : public con::PeerHandler, public MapEventReceiver,
|
class Server : public con::PeerHandler, public MapEventReceiver,
|
||||||
|
|
Loading…
Reference in New Issue