Merge pull request #7 from HimbeerserverDE/def_multiplex

Def multiplex
master
HimbeerserverDE 2021-02-13 19:58:03 +01:00 committed by GitHub
commit e6ae16c4ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 664 additions and 120 deletions

View File

@ -6,40 +6,34 @@ import (
func processAoRmAdd(p *Peer, data []byte) []byte {
countRm := binary.BigEndian.Uint16(data[2:4])
aoRm := make([]uint16, countRm)
aoRmI := 0
var aoRm []uint16
for i := uint16(0); i < countRm; i += 2 {
aoRm[aoRmI] = binary.BigEndian.Uint16(data[4+i : 6+i])
aoRmI++
aoRm = append(aoRm, binary.BigEndian.Uint16(data[4+i:6+i]))
}
countAdd := binary.BigEndian.Uint16(data[4+countRm*2 : 6+countRm*2])
aoAdd := make([]uint16, countAdd)
aoAddI := 0
var aoAdd []uint16
si := 6 + uint32(countRm)*2
for i := uint32(0); i < uint32(countAdd); i++ {
initDataLen := binary.BigEndian.Uint32(data[3+si : 7+si])
namelen := binary.BigEndian.Uint16(data[8+si : 10+si])
if uint16(len(data)) < 10+namelen {
name := data[10+si : 10+si+uint32(namelen)]
if string(name) == p.Username() {
if p.initAoReceived {
binary.BigEndian.PutUint16(data[4+countRm*2:6+countRm*2], countAdd-1)
data = append(data[:si], data[7+si+initDataLen:]...)
} else {
p.initAoReceived = true
}
si += 7 + initDataLen
continue
name := data[10+si : 10+si+uint32(namelen)]
if string(name) == p.Username() {
if p.initAoReceived {
binary.BigEndian.PutUint16(data[4+countRm*2:6+countRm*2], countAdd-1)
data = append(data[:si], data[7+si+initDataLen:]...)
si -= 7 + initDataLen
} else {
p.initAoReceived = true
}
aoAdd[aoAddI] = binary.BigEndian.Uint16(data[si : 2+si])
aoAddI++
si += 7 + initDataLen
continue
}
aoAdd = append(aoAdd, binary.BigEndian.Uint16(data[si:2+si]))
si += 7 + initDataLen
}

72
blockdata.go Normal file
View File

@ -0,0 +1,72 @@
package main
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io"
"github.com/anon55555/mt/rudp"
)
const NodeCount = 16 * 16 * 16
func processBlockdata(p *Peer, pkt *rudp.Pkt) bool {
srv := p.ServerName()
si := 14
// Check for zlib header
for ; !(pkt.Data[si] == 120 && (pkt.Data[1+si] == 0x01 || pkt.Data[1+si] == 0x9C || pkt.Data[1+si] == 0xDA)); si++ {
}
compressedNodes := pkt.Data[13:si]
zr, err := zlib.NewReader(bytes.NewReader(compressedNodes))
if err != nil {
return true
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, zr)
if err != nil {
return true
}
zr.Close()
nodes := buf.Bytes()
for i := uint32(0); i < NodeCount; i++ {
contentID := binary.BigEndian.Uint16(nodes[2*i : 2+2*i])
if contentID >= ContentUnknown && contentID <= ContentIgnore {
continue
}
newID := nodeDefs[srv][contentID].ID()
binary.BigEndian.PutUint16(nodes[2*i:2+2*i], newID)
}
var recompBuf bytes.Buffer
zw := zlib.NewWriter(&recompBuf)
zw.Write(nodes)
zw.Close()
recompNodes := recompBuf.Bytes()
data := make([]byte, 13+len(recompNodes)+len(pkt.Data[si:]))
copy(data[:13], pkt.Data[:13])
copy(data[13:13+len(recompNodes)], recompNodes)
copy(data[13+len(recompNodes):], pkt.Data[si:])
pkt.Data = data
return false
}
func processAddnode(p *Peer, pkt *rudp.Pkt) bool {
srv := p.ServerName()
contentID := binary.BigEndian.Uint16(pkt.Data[8:10])
newID := nodeDefs[srv][contentID].ID()
binary.BigEndian.PutUint16(pkt.Data[8:10], newID)
return false
}

View File

@ -135,11 +135,11 @@ func processPktCommand(src, dst *Peer, pkt *rudp.Pkt) bool {
if ch == rpcCh {
switch sig := pkt.Data[2]; sig {
case ModChSigJoinOk:
src.useRpc = true
src.SetUseRpc(true)
case ModChSigSetState:
state := pkt.Data[5+chlen]
if state == ModChStateRO {
src.useRpc = false
src.SetUseRpc(false)
}
}
return true
@ -147,6 +147,18 @@ func processPktCommand(src, dst *Peer, pkt *rudp.Pkt) bool {
return false
case ToClientModChannelMsg:
return processRpc(src, *pkt)
case ToClientBlockdata:
return processBlockdata(dst, pkt)
case ToClientAddNode:
return processAddnode(dst, pkt)
case ToClientHudAdd:
id := binary.BigEndian.Uint32(pkt.Data[2:6])
dst.huds[id] = true
return false
case ToClientHudRm:
id := binary.BigEndian.Uint32(pkt.Data[2:6])
dst.huds[id] = false
return false
default:
return false
}

289
itemdef.go Normal file
View File

@ -0,0 +1,289 @@
package main
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io"
"math"
)
var itemdef []byte
type ItemDef struct {
name string
data []byte
}
// Name returns the name of an ItemDef
func (i *ItemDef) Name() string { return i.name }
// Data returns the actual definition
func (i *ItemDef) Data() []byte { return i.data }
type GroupCap struct {
name string
uses int16
maxLevel int16
times map[int16]float32
}
// NewGroupCap returns a partially initialised GroupCap
func NewGroupCap(name string, uses, maxLevel int16) *GroupCap {
return &GroupCap{
name: name,
uses: uses,
maxLevel: maxLevel,
times: make(map[int16]float32),
}
}
// Name returns the name of the group
func (g *GroupCap) Name() string { return g.name }
// Uses returns the number of uses
func (g *GroupCap) Uses() int16 { return g.uses }
// MaxLevel returns the maximum level
func (g *GroupCap) MaxLevel() int16 { return g.maxLevel }
// Times returns the digging times
func (g *GroupCap) Times() map[int16]float32 { return g.times }
// SetTimes sets the digging time for a given level
func (g *GroupCap) SetTimes(level int16, time float32) {
g.times[level] = time
}
type ToolCapabs struct {
fullPunchInterval float32
maxDropLevel int16
groupCaps map[string]*GroupCap
damageGroups map[string]int16
punchAttackUses uint16
}
func NewToolCapabs(fullPunchInterval float32, maxDropLevel int16) *ToolCapabs {
return &ToolCapabs{
fullPunchInterval: fullPunchInterval,
maxDropLevel: maxDropLevel,
groupCaps: make(map[string]*GroupCap),
damageGroups: make(map[string]int16),
}
}
// PunchInt returns the full punch interval
func (t *ToolCapabs) PunchInt() float32 { return t.fullPunchInterval }
// MaxDropLevel returns the maximum drop level
func (t *ToolCapabs) MaxDropLevel() int16 { return t.maxDropLevel }
// GroupCaps returns the group capabilities
func (t *ToolCapabs) GroupCaps() map[string]*GroupCap { return t.groupCaps }
// AddGroupCap adds a GroupCap
func (t *ToolCapabs) AddGroupCap(g *GroupCap) {
t.groupCaps[g.Name()] = g
}
// DamageGroups returns the damage groups
func (t *ToolCapabs) DamageGroups() map[string]int16 { return t.damageGroups }
// AddDamageGroup adds a damage group
func (t *ToolCapabs) AddDamageGroup(name string, rating int16) {
t.damageGroups[name] = rating
}
// PunchAttackUses returns the punch attack uses
func (t *ToolCapabs) PunchAttackUses() uint16 { return t.punchAttackUses }
// SetPunchAttackUses sets the punch attack uses
func (t *ToolCapabs) SetPunchAttackUses(uses uint16) {
t.punchAttackUses = uses
}
func bestCap(defs [][]byte, capabs []*ToolCapabs) *ItemDef {
var bestK, bestLen int
for k, cap := range capabs {
var grpLen int
for _, gcap := range cap.GroupCaps() {
grpLen += len(gcap.Times())
}
if grpLen > bestLen {
bestLen = grpLen
bestK = k
}
}
return &ItemDef{data: defs[bestK]}
}
func mergeItemdefs(mgrs [][]byte) error {
var itemDefs []*ItemDef
aliases := make(map[string]string)
var handDefs [][]byte
var handCapabs []*ToolCapabs
// Extract definitions from CItemDefManager
for _, compressedMgr := range mgrs {
zr, err := zlib.NewReader(bytes.NewReader(compressedMgr))
if err != nil {
return err
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, zr)
if err != nil {
return err
}
zr.Close()
mgr := buf.Bytes()
count := binary.BigEndian.Uint16(mgr[1:3])
si := uint32(3)
ItemLoop:
for i := uint16(0); i < count; i++ {
deflen := binary.BigEndian.Uint16(mgr[si : 2+si])
def := mgr[2+si : 2+si+uint32(deflen)]
itemNameLen := binary.BigEndian.Uint16(def[2:4])
itemName := string(def[4 : 4+itemNameLen])
desclen := binary.BigEndian.Uint16(def[4+itemNameLen : 6+itemNameLen])
invImgLen := binary.BigEndian.Uint16(def[6+itemNameLen+desclen : 8+itemNameLen+desclen])
wieldImgLen := binary.BigEndian.Uint16(def[8+itemNameLen+desclen+invImgLen : 10+itemNameLen+desclen+invImgLen])
capablen := binary.BigEndian.Uint16(def[26+itemNameLen+desclen+invImgLen+wieldImgLen : 28+itemNameLen+desclen+invImgLen+wieldImgLen])
capab := def[28+itemNameLen+desclen+invImgLen+wieldImgLen : 28+itemNameLen+desclen+invImgLen+wieldImgLen+capablen]
if capablen > 0 && itemName == "" {
fpi := math.Float32frombits(binary.BigEndian.Uint32(capab[1:5]))
mdl := int16(binary.BigEndian.Uint16(capab[5:7]))
tcaps := NewToolCapabs(fpi, mdl)
grpCapsLen := binary.BigEndian.Uint32(capab[7:11])
sj := uint32(11)
for j := uint32(0); j < grpCapsLen; j++ {
capNameLen := binary.BigEndian.Uint16(capab[sj : 2+sj])
capName := string(capab[2+sj : 2+sj+uint32(capNameLen)])
uses := int16(binary.BigEndian.Uint16(capab[2+sj+uint32(capNameLen) : 4+sj+uint32(capNameLen)]))
maxlevel := int16(binary.BigEndian.Uint16(capab[4+sj+uint32(capNameLen) : 6+sj+uint32(capNameLen)]))
gcap := NewGroupCap(capName, uses, maxlevel)
times := binary.BigEndian.Uint32(capab[6+sj+uint32(capNameLen) : 10+sj+uint32(capNameLen)])
sk := uint32(10 + sj + uint32(capNameLen))
for k := uint32(0); k < times; k++ {
level := int16(binary.BigEndian.Uint16(capab[sk : 2+sk]))
times_v := math.Float32frombits(binary.BigEndian.Uint32(capab[2+sk : 6+sk]))
gcap.SetTimes(level, times_v)
sk += 6
}
tcaps.AddGroupCap(gcap)
sj += uint32(capNameLen) + 10 + times*6
}
dmgGrpCapsLen := binary.BigEndian.Uint32(capab[sj : 4+sj])
sj += 4
for j := uint32(0); j < dmgGrpCapsLen; j++ {
dmgNameLen := binary.BigEndian.Uint16(capab[sj : 2+sj])
dmgName := string(capab[2+sj : 2+sj+uint32(dmgNameLen)])
rating := int16(binary.BigEndian.Uint16(capab[2+sj+uint32(dmgNameLen) : 4+sj+uint32(dmgNameLen)]))
tcaps.AddDamageGroup(dmgName, rating)
sj += 4 + uint32(dmgNameLen)
}
tcaps.SetPunchAttackUses(binary.BigEndian.Uint16(capab[sj : 2+sj]))
handDefs = append(handDefs, def)
handCapabs = append(handCapabs, tcaps)
si += 2 + uint32(deflen)
continue ItemLoop
}
for _, idef := range itemDefs {
if idef.Name() == itemName {
si += 2 + uint32(deflen)
continue ItemLoop
}
}
itemDefs = append(itemDefs, &ItemDef{name: itemName, data: def})
si += 2 + uint32(deflen)
}
aliasCount := binary.BigEndian.Uint16(mgr[si : 2+si])
si += 2
for i := uint16(0); i < aliasCount; i++ {
namelen := binary.BigEndian.Uint16(mgr[si : 2+si])
name := string(mgr[2+si : 2+si+uint32(namelen)])
convertlen := binary.BigEndian.Uint16(mgr[2+si+uint32(namelen) : 4+si+uint32(namelen)])
convert := string(mgr[4+si+uint32(namelen) : 4+si+uint32(namelen)+uint32(convertlen)])
if aliases[name] == "" {
aliases[name] = convert
}
si += 4 + uint32(namelen) + uint32(convertlen)
}
}
hand := bestCap(handDefs, handCapabs)
itemDefs = append(itemDefs, hand)
// Merge definitions into new CItemDefManager
mgr := make([]byte, 3)
mgr[0] = uint8(0x00)
binary.BigEndian.PutUint16(mgr[1:3], uint16(len(itemDefs)))
var allDefs []byte
for _, def := range itemDefs {
defData := make([]byte, 2+len(def.Data()))
binary.BigEndian.PutUint16(defData[0:2], uint16(len(def.Data())))
copy(defData[2:], def.Data())
allDefs = append(allDefs, defData...)
}
mgr = append(mgr, allDefs...)
aliasCount := make([]byte, 2)
binary.BigEndian.PutUint16(aliasCount, uint16(len(aliases)))
mgr = append(mgr, aliasCount...)
for name, convert := range aliases {
namelen := make([]byte, 2)
binary.BigEndian.PutUint16(namelen, uint16(len(name)))
convertlen := make([]byte, 2)
binary.BigEndian.PutUint16(convertlen, uint16(len(convert)))
mgr = append(mgr, namelen...)
mgr = append(mgr, []byte(name)...)
mgr = append(mgr, convertlen...)
mgr = append(mgr, []byte(convert)...)
}
var compressedMgr bytes.Buffer
zw := zlib.NewWriter(&compressedMgr)
zw.Write(mgr)
zw.Close()
itemdef = compressedMgr.Bytes()
return nil
}

View File

@ -49,6 +49,7 @@ func (l *Listener) Accept() (*Peer, error) {
clt.aoIDs = make(map[uint16]bool)
clt.modChs = make(map[string]bool)
clt.huds = make(map[uint32]bool)
maxPeers, ok := GetConfKey("player_limit").(int)
if !ok {

103
media.go
View File

@ -17,9 +17,7 @@ import (
const MediaRefetchInterval = 10 * time.Minute
var media map[string]*mediaFile
var tooldefs [][]byte
var nodedefs [][]byte
var craftitemdefs [][]byte
var nodedefs map[string][]byte
var itemdefs [][]byte
var detachedinvs map[string][][]byte
var movement []byte
@ -47,14 +45,17 @@ func (p *Peer) fetchMedia() {
}
switch cmd := binary.BigEndian.Uint16(pkt.Data[0:2]); cmd {
case ToClientTooldef:
tooldefs = append(tooldefs, pkt.Data[2:])
case ToClientNodedef:
nodedefs = append(nodedefs, pkt.Data[2:])
case ToClientCraftitemdef:
craftitemdefs = append(craftitemdefs, pkt.Data[2:])
servers := GetConfKey("servers").(map[interface{}]interface{})
var srvname string
for server := range servers {
if GetConfKey("servers:"+server.(string)+":address") == p.Addr().String() {
srvname = server.(string)
}
}
nodedefs[srvname] = pkt.Data[6:]
case ToClientItemdef:
itemdefs = append(itemdefs, pkt.Data[2:])
itemdefs = append(itemdefs, pkt.Data[6:])
case ToClientMovement:
movement = pkt.Data[2:]
case ToClientDetachedInventory:
@ -158,70 +159,38 @@ func (p *Peer) announceMedia() {
return
}
for _, def := range tooldefs {
data := make([]byte, 2+len(def))
data[0] = uint8(0x00)
data[1] = uint8(ToClientTooldef)
copy(data[2:], def)
data := make([]byte, 6+len(nodedef))
data[0] = uint8(0x00)
data[1] = uint8(ToClientNodedef)
binary.BigEndian.PutUint32(data[2:6], uint32(len(nodedef)))
copy(data[6:], nodedef)
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
continue
}
<-ack
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
}
<-ack
for _, def := range nodedefs {
data := make([]byte, 2+len(def))
data[0] = uint8(0x00)
data[1] = uint8(ToClientNodedef)
copy(data[2:], def)
data = make([]byte, 6+len(itemdef))
data[0] = uint8(0x00)
data[1] = uint8(ToClientItemdef)
binary.BigEndian.PutUint32(data[2:6], uint32(len(itemdef)))
copy(data[6:], itemdef)
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
continue
}
<-ack
}
for _, def := range craftitemdefs {
data := make([]byte, 2+len(def))
data[0] = uint8(0x00)
data[1] = uint8(ToClientCraftitemdef)
copy(data[2:], def)
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
continue
}
<-ack
}
for _, def := range itemdefs {
data := make([]byte, 2+len(def))
data[0] = uint8(0x00)
data[1] = uint8(ToClientItemdef)
copy(data[2:], def)
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
continue
}
<-ack
ack, err = p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
}
<-ack
p.updateDetachedInvs(srvname)
data := make([]byte, 2+len(movement))
data = make([]byte, 2+len(movement))
data[0] = uint8(0x00)
data[1] = uint8(ToClientMovement)
copy(data[2:], movement)
ack, err := p.Send(rudp.Pkt{Data: data})
ack, err = p.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
}
@ -393,6 +362,8 @@ func loadMedia() {
log.Print("Fetching media")
media = make(map[string]*mediaFile)
nodedefs = make(map[string][]byte)
itemdefs = [][]byte{}
detachedinvs = make(map[string][][]byte)
loadMediaCache()
@ -426,6 +397,16 @@ func loadMedia() {
srv.fetchMedia()
}
if nodedef == nil {
if err := mergeNodedefs(nodedefs); err != nil {
log.Fatal(err)
}
}
if err := mergeItemdefs(itemdefs); err != nil {
log.Fatal(err)
}
updateMediaCache()
}

View File

@ -1,6 +1,6 @@
/*
Multiserver is a multi-server minetest reverse proxy capable of
media multiplexing
media and definition multiplexing
*/
package main

127
nodedef.go Normal file
View File

@ -0,0 +1,127 @@
package main
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io"
)
var nodedef []byte
const (
ContentUnknown = 125
ContentAir = 126
ContentIgnore = 127
)
type NodeDef struct {
id uint16
name string
data []byte
}
var nodeDefs map[string]map[uint16]*NodeDef
// ID returns the content ID of a NodeDef
func (n *NodeDef) ID() uint16 { return n.id }
// Name returns the name of a NodeDef
func (n *NodeDef) Name() string { return n.name }
// Data returns the actual definition
func (n *NodeDef) Data() []byte { return n.data }
func mergeNodedefs(mgrs map[string][]byte) error {
var total uint16
nodeDefs = make(map[string]map[uint16]*NodeDef)
var nextID uint16
// Extract definitions from NodeDefManagers
for srv, compressedMgr := range mgrs {
if nodeDefs[srv] == nil {
nodeDefs[srv] = make(map[uint16]*NodeDef)
}
zr, err := zlib.NewReader(bytes.NewReader(compressedMgr))
if err != nil {
return err
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, zr)
if err != nil {
return err
}
zr.Close()
mgr := buf.Bytes()
count := binary.BigEndian.Uint16(mgr[1:3])
si := uint32(7)
NodeLoop:
for i := uint16(0); i < count; i++ {
id := binary.BigEndian.Uint16(mgr[si : 2+si])
deflen := binary.BigEndian.Uint16(mgr[2+si : 4+si])
nodeNameLen := binary.BigEndian.Uint16(mgr[5+si : 7+si])
nodeName := string(mgr[7+si : 7+si+uint32(nodeNameLen)])
for _, srvdefs := range nodeDefs {
for _, def := range srvdefs {
if def.Name() == nodeName {
nodeDefs[srv][id] = &NodeDef{id: def.ID()}
si += 4 + uint32(deflen)
continue NodeLoop
}
}
}
nodeDefs[srv][id] = &NodeDef{
id: nextID,
name: nodeName,
data: mgr[2+si : 4+si+uint32(deflen)],
}
total++
nextID++
if nextID == ContentUnknown {
nextID = ContentIgnore + 1
}
si += 4 + uint32(deflen)
}
}
// Merge definitions into new NodeDefManager
mgr := make([]byte, 7)
mgr[0] = uint8(1)
binary.BigEndian.PutUint16(mgr[1:3], total)
var allDefs []byte
for _, srvdefs := range nodeDefs {
for _, def := range srvdefs {
if len(def.Data()) > 0 {
defData := make([]byte, 2+len(def.Data()))
binary.BigEndian.PutUint16(defData[0:2], def.ID())
copy(defData[2:], def.Data())
allDefs = append(allDefs, defData...)
}
}
}
binary.BigEndian.PutUint32(mgr[3:7], uint32(len(allDefs)))
mgr = append(mgr, allDefs...)
var compressedMgr bytes.Buffer
zw := zlib.NewWriter(&compressedMgr)
zw.Write(mgr)
zw.Close()
nodedef = compressedMgr.Bytes()
return nil
}

23
peer.go
View File

@ -37,8 +37,11 @@ type Peer struct {
initAoReceived bool
aoIDs map[uint16]bool
useRpc bool
modChs map[string]bool
useRpcMu sync.RWMutex
useRpc bool
modChs map[string]bool
huds map[uint32]bool
}
// Username returns the username of the Peer
@ -92,6 +95,22 @@ func (p *Peer) SetServer(s *Peer) {
p.srv = s
}
// UseRpc reports whether RPC messages can be sent to the Peer
func (p *Peer) UseRpc() bool {
p.useRpcMu.RLock()
defer p.useRpcMu.RUnlock()
return p.useRpc
}
// SetUseRpc sets the value returned by UseRpc
func (p *Peer) SetUseRpc(useRpc bool) {
p.useRpcMu.Lock()
defer p.useRpcMu.Unlock()
p.useRpc = useRpc
}
// Connect connects to the server on conn
// and closes conn when the Peer disconnects
func Connect(conn net.PacketConn, addr net.Addr) (*Peer, error) {

View File

@ -70,36 +70,27 @@ func (p *Peer) Redirect(newsrv string) error {
return err
}
// Remove active objects
data := make([]byte, 6+len(p.aoIDs)*2)
data[0] = uint8(0x00)
data[1] = uint8(ToClientActiveObjectRemoveAdd)
binary.BigEndian.PutUint16(data[2:4], uint16(len(p.aoIDs)))
i := 4
for ao := range p.aoIDs {
binary.BigEndian.PutUint16(data[i:2+i], ao)
i += 2
}
binary.BigEndian.PutUint16(data[i:2+i], uint16(0))
// Update detached inventories
if len(detachedinvs[newsrv]) > 0 {
for i := range detachedinvs[newsrv] {
data := make([]byte, 2+len(detachedinvs[newsrv][i]))
data[0] = uint8(0x00)
data[1] = uint8(ToClientDetachedInventory)
copy(data[2:], detachedinvs[newsrv][i])
ack, err := p.Send(rudp.Pkt{Data: data})
if err != nil {
return err
}
<-ack
}
// Reset formspec style
data := []byte{
0x00, ToClientFormspecPrepend,
0x00, 0x00,
}
ack, err := p.Send(rudp.Pkt{Data: data})
// Remove active objects
data = make([]byte, 6+len(p.aoIDs)*2)
data[0] = uint8(0x00)
data[1] = uint8(ToClientActiveObjectRemoveAdd)
binary.BigEndian.PutUint16(data[2:4], uint16(len(p.aoIDs)))
si := 4
for ao := range p.aoIDs {
binary.BigEndian.PutUint16(data[si:2+si], ao)
si += 2
}
binary.BigEndian.PutUint16(data[si:2+si], uint16(0))
ack, err = p.Send(rudp.Pkt{Data: data})
if err != nil {
return err
}
@ -107,6 +98,60 @@ func (p *Peer) Redirect(newsrv string) error {
p.aoIDs = make(map[uint16]bool)
// Remove HUDs
data = []byte{0, ToClientHudSetParam, 0, 1, 0, 4, 0, 0, 0, 8}
ack, err = p.Send(rudp.Pkt{ChNo: 1, Data: data})
if err != nil {
return err
}
<-ack
data = []byte{0, ToClientHudSetParam, 0, 2, 0, 0}
ack, err = p.Send(rudp.Pkt{ChNo: 1, Data: data})
if err != nil {
return err
}
<-ack
data = []byte{0, ToClientHudSetParam, 0, 3, 0, 0}
ack, err = p.Send(rudp.Pkt{ChNo: 1, Data: data})
if err != nil {
return err
}
<-ack
for hud := range p.huds {
data = make([]byte, 6)
data[0] = uint8(0x00)
data[1] = uint8(ToClientHudRm)
binary.BigEndian.PutUint32(data[2:6], hud)
ack, err = p.Send(rudp.Pkt{ChNo: 1, Data: data})
if err != nil {
return err
}
<-ack
}
// Update detached inventories
if len(detachedinvs[newsrv]) > 0 {
for i := range detachedinvs[newsrv] {
data = make([]byte, 2+len(detachedinvs[newsrv][i]))
data[0] = uint8(0x00)
data[1] = uint8(ToClientDetachedInventory)
copy(data[2:], detachedinvs[newsrv][i])
ack, err = p.Send(rudp.Pkt{Data: data})
if err != nil {
return err
}
<-ack
}
}
p.Server().stopForwarding()
fin := make(chan *Peer) // close-only
@ -126,7 +171,7 @@ func (p *Peer) Redirect(newsrv string) error {
binary.BigEndian.PutUint16(data[2:4], uint16(len(ch)))
copy(data[4:], []byte(ch))
ack, err := srv.Send(rudp.Pkt{Data: data})
ack, err = srv.Send(rudp.Pkt{Data: data})
if err != nil {
log.Print(err)
}

14
rpc.go
View File

@ -125,7 +125,7 @@ func processRpc(p *Peer, pkt rudp.Pkt) bool {
}
func (p *Peer) doRpc(rpc, rq string) {
if !p.useRpc {
if !p.UseRpc() {
return
}
@ -147,6 +147,8 @@ func (p *Peer) doRpc(rpc, rq string) {
}
func connectRpc() {
log.Print("Establishing RPC connections")
servers := GetConfKey("servers").(map[interface{}]interface{})
for server := range servers {
clt := &Peer{username: "rpc"}
@ -203,11 +205,11 @@ func connectRpc() {
if ch == rpcCh {
switch sig := pkt.Data[2]; sig {
case ModChSigJoinOk:
srv.useRpc = true
srv.SetUseRpc(true)
case ModChSigSetState:
state := pkt.Data[5+chlen]
if state == ModChStateRO {
srv.useRpc = false
srv.SetUseRpc(false)
}
}
}
@ -298,11 +300,11 @@ func startRpc() {
if ch == rpcCh {
switch sig := pkt.Data[2]; sig {
case ModChSigJoinOk:
srv.useRpc = true
srv.SetUseRpc(true)
case ModChSigSetState:
state := pkt.Data[5+chlen]
if state == ModChStateRO {
srv.useRpc = false
srv.SetUseRpc(false)
}
}
}
@ -319,5 +321,7 @@ func startRpc() {
}
func init() {
rpcSrvMu.Lock()
rpcSrvs = make(map[*Peer]struct{})
rpcSrvMu.Unlock()
}