Merge branch 'master' of github.com:pandorabox-io/pandorabox.io

master
Thomas Rudin 2019-07-21 21:57:30 +02:00
commit 3f44ca2e69
10 changed files with 88 additions and 70 deletions

View File

@ -1,23 +0,0 @@
{
"layers": [{
"id": 0,
"name": "Earth",
"from": -16,
"to": 160
},{
"id": 1,
"name": "Moon",
"from": 5000,
"to": 5500
},{
"id": 2,
"name": "Space",
"from": 6000,
"to": 10000
},{
"id": 3,
"name": "Mars",
"from": 15850,
"to": 17000
}]
}

View File

@ -1,18 +0,0 @@
minetest.db.url=jdbc:postgresql://postgres:5432/postgres
tile.db.url=jdbc:postgresql://postgres-tiles:5432/postgres
tile.cache.impl=DATABASE
#tracker.matomo.id=1
#tracker.matomo.url=https://analytics.rudin.io/piwik.js
#tilerenderer.initialrendering.enable=true
#log.tile.updatetimings=true
tilerenderer.updateinterval=5
#log.query.performance=true
tilerenderer.maxupdateblocks=1000
tile.rendering.strategy=ASAP
prometheus.enable=true

37
doc/backup.md Normal file
View File

@ -0,0 +1,37 @@
# Server backup replication
## Methodology
Backups are taken daily with the help of lvm snapshots.
Here the backup-script from a host on the private cluster:
```sh
#!/bin/sh
cd $(dirname $0)
# prepare variables
TIMESTAMP=$(date +%Y-%m-%d_%H-%M-%S)
REMOTE=root@pandorabox.io
# create the LVM-snapshot on the main server
ssh $REMOTE "/sbin/lvcreate -l 90%FREE -s -n backup /dev/mapper/vg0-data"
# mount the snapshot data read-only (remote)
ssh $REMOTE "mount -o ro /dev/mapper/vg0-backup /mnt/snapshot/"
# rsync the snapshotted directory tree
rsync -avz --bwlimit 8000 --delete ${REMOTE}:/mnt/snapshot/pandorabox.io/ /backup/pandorabox.io/
# cleanup
ssh $REMOTE "umount /mnt/snapshot"
ssh $REMOTE "lvremove /dev/mapper/vg0-backup -f"
# create incremental backups with borg
borg create --ignore-inode .::${TIMESTAMP} /backup/pandorabox.io
# prune old backups
borg prune . --keep-daily 5 --keep-monthly 3 --keep-yearly 10
```
**NOTE**: The snapshot volume fills up with data during the backup process

48
doc/folder_structure.md Normal file
View File

@ -0,0 +1,48 @@
# folder structure of the pandorabox server
Server/Application root directory `/data/pandorabox.io`
```
/data
+ /minetest
+ /world
+ /nginx
+ /postgres-minetest
+ /wiki
/doc
/docker
/scripts
docker-compose.yml
psql.sh
psql-mapserver.sh
psql-wiki.sh
create-export.sh
convert-test.sh
restart.sh
```
## restart.sh
As the name suggests, restarts the minetest-server
## convert-test.sh
Converts the server to the test-instance (url's, announce-name, /news)
**WARNING** Don't do this on the main server! This is intended to execute on a restored backup only!
## psql.sh
Starts a sql-shell on the main database (blocks, players)
## /data/minetest/world
The main directory for all things minetesty..
## /data/minetest/world/worldmods
All installed mods
## create-export.sh
Creates a server-extract with the main parts (blocks,mods,essential mod-data) to distribute
**NOTE** This will create 2 files that amount to ca. 30 GB in the folder `/export`
**WARNING** Don't do this on the main server, the daily backup _will_ replicate this!

View File

@ -12,7 +12,7 @@ services:
- "postgres"
volumes:
- "./data/minetest:/data"
- "./data/minetest/debug.txt:/usr/local/debug.txt"
- "./data/minetest/debug.txt:/root/.minetest/debug.txt"
working_dir: /data
command: minetestserver --config /data/minetest.conf --world /data/world/ --quiet
logging:

View File

@ -1 +0,0 @@
docker-compose exec postgres pg_dump -U postgres postgres

View File

@ -1,7 +0,0 @@
docker-compose stop minetest
mv data/minetest/world/luaentities data/minetest/world/luaentities.bak
mv data/minetest/world/mesecon_actionqueue data/minetest/world/mesecon_actionqueue.bak
docker-compose start minetest && docker-compose logs -f --tail 200 minetest

2
scripts/check_inv_size.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
docker-compose exec postgres psql -U postgres -c "select player, inv_id, length(item) as size from player_inventory_items where player in (select name from player order by modification_date desc limit 20) order by size desc limit 30"

View File

@ -1,18 +0,0 @@
alter table blocks add column changecount bigint not null default 0;
create index CONCURRENTLY blocks_changecount_index on blocks(changecount);
create or replace function on_blocks_change_count() returns trigger as
$BODY$
BEGIN
NEW.changecount = OLD.changecount + 1;
return NEW;
END;
$BODY$
LANGUAGE plpgsql;
create trigger blocks_update_changecount
before update
on blocks
for each row
execute procedure on_blocks_change_count();

View File

@ -1,2 +0,0 @@
#!/bin/sh
cat changecount_column.sql | sudo docker-compose exec postgres psql -U postgres