Update tests
This commit is contained in:
parent
6226842691
commit
6add4ce2b9
@ -5,7 +5,6 @@ const mapSeries = require('p-each-series')
|
||||
const path = require('path')
|
||||
const rmrf = require('rimraf')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
const {
|
||||
config,
|
||||
@ -55,9 +54,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
await stopIpfs(ipfsd2)
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
orbitdb1 = new OrbitDB(ipfs1, './orbitdb/1')
|
||||
orbitdb2 = new OrbitDB(ipfs2, './orbitdb/2')
|
||||
beforeEach(async () => {
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: './orbitdb/1' })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: './orbitdb/2' })
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
@ -100,11 +99,13 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
it('syncs counters', async () => {
|
||||
let options = {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
const numbers = [[13, 10], [2, 5]]
|
||||
|
@ -9,7 +9,7 @@ const levelup = require('levelup')
|
||||
const leveldown = require('leveldown')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
const OrbitDBAddress = require('../src/orbit-db-address')
|
||||
|
||||
const io = require('orbit-db-io')
|
||||
// Include test utilities
|
||||
const {
|
||||
config,
|
||||
@ -34,11 +34,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb = new OrbitDB(ipfs, dbPath)
|
||||
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb)
|
||||
if(orbitdb)
|
||||
await orbitdb.stop()
|
||||
|
||||
if (ipfsd)
|
||||
@ -104,8 +104,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
it('database has the correct address', async () => {
|
||||
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
|
||||
assert.equal(db.address.toString().indexOf('Qm'), 9)
|
||||
assert.equal(db.address.toString().indexOf('second'), 56)
|
||||
assert.equal(db.address.toString().indexOf('zd'), 9)
|
||||
assert.equal(db.address.toString().indexOf('second'), 59)
|
||||
})
|
||||
|
||||
it('saves the database locally', async () => {
|
||||
@ -132,8 +132,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
|
||||
it('saves database manifest file locally', async () => {
|
||||
const dag = await ipfs.object.get(db.address.root)
|
||||
const manifest = JSON.parse(dag.toJSON().data)
|
||||
const manifest = await io.read(ipfs, db.address.root)
|
||||
assert.notEqual(manifest, )
|
||||
assert.equal(manifest.name, 'second')
|
||||
assert.equal(manifest.type, 'feed')
|
||||
@ -165,22 +164,21 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
it('creates an access controller and adds ourselves as writer by default', async () => {
|
||||
db = await orbitdb.create('fourth', 'feed')
|
||||
assert.deepEqual(db.access.write, [orbitdb.key.getPublic('hex')])
|
||||
assert.deepEqual(db.access.write, [orbitdb.identity.publicKey])
|
||||
})
|
||||
|
||||
it('creates an access controller and adds writers', async () => {
|
||||
db = await orbitdb.create('fourth', 'feed', { write: ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')] })
|
||||
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')])
|
||||
})
|
||||
|
||||
it('creates an access controller and doesn\'t add an admin', async () => {
|
||||
db = await orbitdb.create('sixth', 'feed')
|
||||
assert.deepEqual(db.access.admin, [])
|
||||
db = await orbitdb.create('fourth', 'feed', {
|
||||
accessController: {
|
||||
write: ['another-key', 'yet-another-key', orbitdb.identity.publicKey]
|
||||
}
|
||||
})
|
||||
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.identity.publicKey])
|
||||
})
|
||||
|
||||
it('creates an access controller and doesn\'t add read access keys', async () => {
|
||||
db = await orbitdb.create('seventh', 'feed', { read: ['one', 'two'] })
|
||||
assert.deepEqual(db.access.read, [])
|
||||
assert.deepEqual(db.access.write, [orbitdb.identity.publicKey])
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -222,7 +220,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns the address that would have been created', async () => {
|
||||
db = await orbitdb.create('third', 'feed', { replicate: false })
|
||||
assert.equal(address.toString().indexOf('/orbitdb'), 0)
|
||||
assert.equal(address.toString().indexOf('Qm'), 9)
|
||||
assert.equal(address.toString().indexOf('zd'), 9)
|
||||
assert.equal(address.toString(), db.address.toString())
|
||||
})
|
||||
})
|
||||
@ -256,21 +254,21 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('opens a database - name only', async () => {
|
||||
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
|
||||
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
|
||||
assert.equal(db.address.toString().indexOf('Qm'), 9)
|
||||
assert.equal(db.address.toString().indexOf('abc'), 56)
|
||||
assert.equal(db.address.toString().indexOf('zd'), 9)
|
||||
assert.equal(db.address.toString().indexOf('abc'), 59)
|
||||
})
|
||||
|
||||
it('opens the same database - from an address', async () => {
|
||||
db = await orbitdb.open(db.address)
|
||||
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
|
||||
assert.equal(db.address.toString().indexOf('Qm'), 9)
|
||||
assert.equal(db.address.toString().indexOf('abc'), 56)
|
||||
assert.equal(db.address.toString().indexOf('zd'), 9)
|
||||
assert.equal(db.address.toString().indexOf('abc'), 59)
|
||||
})
|
||||
|
||||
it('opens a database and adds the creator as the only writer', async () => {
|
||||
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true, write: [] })
|
||||
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
|
||||
assert.equal(db.access.write.length, 1)
|
||||
assert.equal(db.access.write[0], db.key.getPublic('hex'))
|
||||
assert.equal(db.access.write[0], db.identity.publicKey)
|
||||
})
|
||||
|
||||
it('doesn\'t open a database if we don\'t have it locally', async () => {
|
||||
@ -298,6 +296,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
await db.add('hello2')
|
||||
|
||||
db = await orbitdb.open(db.address)
|
||||
|
||||
await db.load()
|
||||
const res = db.iterator({ limit: -1 }).collect()
|
||||
|
||||
@ -307,5 +306,4 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -39,7 +39,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb = new OrbitDB(ipfs, dbPath)
|
||||
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
const CustomCache = require('orbit-db-cache')
|
||||
// Include test utilities
|
||||
@ -29,7 +30,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', {
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, {
|
||||
directory: path.join(dbPath, '1'),
|
||||
cache: CustomTestCache
|
||||
})
|
||||
})
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
// Include test utilities
|
||||
const {
|
||||
@ -28,8 +29,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', {
|
||||
keystore: CustomTestKeystore
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, {
|
||||
directory: path.join(dbPath, '1'),
|
||||
keystore: CustomTestKeystore().create()
|
||||
})
|
||||
})
|
||||
|
||||
@ -58,10 +60,10 @@ Object.keys(testAPIs).forEach(API => {
|
||||
databases.forEach(async (database) => {
|
||||
it(database.type + ' allows custom keystore', async () => {
|
||||
const options = {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
// Set write access for both clients
|
||||
write: [orbitdb1.identity.publicKey],
|
||||
}
|
||||
}
|
||||
|
||||
const db1 = await database.create(orbitdb1, 'custom-keystore', options)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -26,14 +27,14 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(config.daemon1.repo)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if (ipfsd)
|
||||
if (ipfsd)
|
||||
await stopIpfs(ipfsd)
|
||||
})
|
||||
|
||||
@ -141,11 +142,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10}
|
||||
|
||||
const expectedOperation = {
|
||||
op: 'PUT',
|
||||
key: 'sup world',
|
||||
value: {
|
||||
_id: 'sup world',
|
||||
doc: 'some of the things',
|
||||
op: 'PUT',
|
||||
key: 'sup world',
|
||||
value: {
|
||||
_id: 'sup world',
|
||||
doc: 'some of the things',
|
||||
views: 10
|
||||
},
|
||||
}
|
||||
@ -160,7 +161,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
assert.deepEqual(res.payload, expectedOperation)
|
||||
assert.notEqual(res.next, undefined)
|
||||
assert.equal(res.next.length, 1)
|
||||
assert.equal(res.v, 0)
|
||||
assert.equal(res.v, 1)
|
||||
assert.notEqual(res.clock, undefined)
|
||||
assert.equal(res.clock.time, 2)
|
||||
assert.notEqual(res.key, undefined)
|
||||
@ -170,10 +171,10 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
describe('Specified index', function() {
|
||||
beforeEach(async () => {
|
||||
const options = {
|
||||
indexBy: 'doc',
|
||||
replicate: false,
|
||||
maxHistory: 0
|
||||
const options = {
|
||||
indexBy: 'doc',
|
||||
replicate: false,
|
||||
maxHistory: 0
|
||||
}
|
||||
db = await orbitdb1.docstore(config.dbname, options)
|
||||
})
|
||||
|
@ -30,11 +30,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb = new OrbitDB(ipfs, dbPath)
|
||||
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb)
|
||||
if(orbitdb)
|
||||
await orbitdb.stop()
|
||||
|
||||
if (ipfsd)
|
||||
@ -53,7 +53,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('removes local database files', async () => {
|
||||
await db.drop()
|
||||
assert.equal(fs.existsSync(localDataPath), false)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -3,6 +3,7 @@
|
||||
const assert = require('assert')
|
||||
const mapSeries = require('p-map-series')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -30,11 +31,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if (ipfsd)
|
||||
@ -87,12 +88,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
it('adds an item that is > 256 bytes', async () => {
|
||||
db = await orbitdb1.eventlog('third database')
|
||||
let msg = new Buffer(1024)
|
||||
let msg = Buffer.alloc(1024)
|
||||
msg.fill('a')
|
||||
const hash = await db.add(msg.toString())
|
||||
assert.notEqual(hash, null)
|
||||
assert.equal(hash.startsWith('Qm'), true)
|
||||
assert.equal(hash.length, 46)
|
||||
assert.equal(hash.startsWith('zd'), true)
|
||||
assert.equal(hash.length, 49)
|
||||
})
|
||||
})
|
||||
|
||||
@ -118,7 +119,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const iter = db.iterator()
|
||||
const next = iter.next().value
|
||||
assert.notEqual(next, null)
|
||||
assert.equal(next.hash.startsWith('Qm'), true)
|
||||
assert.equal(next.hash.startsWith('zd'), true)
|
||||
assert.equal(next.payload.key, null)
|
||||
assert.equal(next.payload.value, 'hello4')
|
||||
})
|
||||
|
@ -3,6 +3,7 @@
|
||||
const assert = require('assert')
|
||||
const mapSeries = require('p-map-series')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -30,11 +31,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if (ipfsd)
|
||||
@ -61,20 +62,20 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const hash = await db.add('hello1')
|
||||
const items = db.iterator({ limit: -1 }).collect()
|
||||
assert.notEqual(hash, null)
|
||||
assert.equal(hash, last(items).hash)
|
||||
assert.equal(hash, last(items).cid)
|
||||
assert.equal(items.length, 1)
|
||||
})
|
||||
|
||||
it('returns the added entry\'s hash, 2 entries', async () => {
|
||||
db = await orbitdb1.feed(address)
|
||||
await db.load()
|
||||
const prevHash = db.iterator().collect()[0].hash
|
||||
const prevHash = db.iterator().collect()[0].cid
|
||||
const hash = await db.add('hello2')
|
||||
const items = db.iterator({ limit: -1 }).collect()
|
||||
assert.equal(items.length, 2)
|
||||
assert.notEqual(hash, null)
|
||||
assert.notEqual(hash, prevHash)
|
||||
assert.equal(hash, last(items).hash)
|
||||
assert.equal(hash, last(items).cid)
|
||||
})
|
||||
|
||||
it('adds five items', async () => {
|
||||
@ -88,12 +89,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
it('adds an item that is > 256 bytes', async () => {
|
||||
db = await orbitdb1.feed('third')
|
||||
let msg = new Buffer(1024)
|
||||
let msg = Buffer.alloc(1024)
|
||||
msg.fill('a')
|
||||
const hash = await db.add(msg.toString())
|
||||
assert.notEqual(hash, null)
|
||||
assert.equal(hash.startsWith('Qm'), true)
|
||||
assert.equal(hash.length, 46)
|
||||
assert.equal(hash.startsWith('zd'), true)
|
||||
assert.equal(hash.length, 49)
|
||||
})
|
||||
|
||||
it('deletes an item when only one item in the database', async () => {
|
||||
@ -101,7 +102,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const hash = await db.add('hello3')
|
||||
const delopHash = await db.remove(hash)
|
||||
const items = db.iterator().collect()
|
||||
assert.equal(delopHash.startsWith('Qm'), true)
|
||||
assert.equal(delopHash.startsWith('zd'), true)
|
||||
assert.equal(items.length, 0)
|
||||
})
|
||||
|
||||
@ -129,7 +130,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
const firstItem = items[0]
|
||||
const secondItem = items[1]
|
||||
assert.equal(firstItem.hash.startsWith('Qm'), true)
|
||||
assert.equal(firstItem.cid.startsWith('zd'), true)
|
||||
assert.equal(firstItem.payload.key, null)
|
||||
assert.equal(firstItem.payload.value, 'hello2')
|
||||
assert.equal(secondItem.payload.value, 'hello3')
|
||||
@ -158,7 +159,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const iter = db.iterator()
|
||||
const next = iter.next().value
|
||||
assert.notEqual(next, null)
|
||||
assert.equal(next.hash.startsWith('Qm'), true)
|
||||
assert.equal(next.cid.startsWith('zd'), true)
|
||||
assert.equal(next.payload.key, null)
|
||||
assert.equal(next.payload.value, 'hello4')
|
||||
})
|
||||
@ -177,7 +178,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const iter = db.iterator()
|
||||
const first = iter.next().value
|
||||
const second = iter.next().value
|
||||
assert.equal(first.hash, hashes[hashes.length - 1])
|
||||
assert.equal(first.cid, hashes[hashes.length - 1])
|
||||
assert.equal(second, null)
|
||||
assert.equal(first.payload.value, 'hello4')
|
||||
})
|
||||
@ -217,7 +218,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const iter = db.iterator({ limit: 0 })
|
||||
const first = iter.next().value
|
||||
const second = iter.next().value
|
||||
assert.equal(first.hash, last(hashes))
|
||||
assert.equal(first.cid, last(hashes))
|
||||
assert.equal(second, null)
|
||||
})
|
||||
|
||||
@ -225,7 +226,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const iter = db.iterator({ limit: 1 })
|
||||
const first = iter.next().value
|
||||
const second = iter.next().value
|
||||
assert.equal(first.hash, last(hashes))
|
||||
assert.equal(first.cid, last(hashes))
|
||||
assert.equal(second, null)
|
||||
})
|
||||
|
||||
@ -235,16 +236,16 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const second = iter.next().value
|
||||
const third = iter.next().value
|
||||
const fourth = iter.next().value
|
||||
assert.equal(first.hash, hashes[hashes.length - 3])
|
||||
assert.equal(second.hash, hashes[hashes.length - 2])
|
||||
assert.equal(third.hash, hashes[hashes.length - 1])
|
||||
assert.equal(first.cid, hashes[hashes.length - 3])
|
||||
assert.equal(second.cid, hashes[hashes.length - 2])
|
||||
assert.equal(third.cid, hashes[hashes.length - 1])
|
||||
assert.equal(fourth, null)
|
||||
})
|
||||
|
||||
it('returns all items', () => {
|
||||
const messages = db.iterator({ limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
messages.reverse()
|
||||
assert.equal(messages.length, hashes.length)
|
||||
@ -254,7 +255,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns all items when limit is bigger than -1', () => {
|
||||
const messages = db.iterator({ limit: -300 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, hashes.length)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -263,7 +264,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns all items when limit is bigger than number of items', () => {
|
||||
const messages = db.iterator({ limit: 300 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, hashes.length)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -275,7 +276,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns 1 item when gte is the head', () => {
|
||||
const messages = db.iterator({ gte: last(hashes), limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 1)
|
||||
assert.equal(messages[0], last(hashes))
|
||||
@ -290,7 +291,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const gte = hashes[hashes.length - 2]
|
||||
const messages = db.iterator({ gte: gte, limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 2)
|
||||
assert.equal(messages[0], hashes[hashes.length - 2])
|
||||
@ -300,7 +301,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns all items when gte is the root item', () => {
|
||||
const messages = db.iterator({ gte: hashes[0], limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, hashes.length)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -310,7 +311,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns items when gt is the root item', () => {
|
||||
const messages = db.iterator({ gt: hashes[0], limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, itemCount - 1)
|
||||
assert.equal(messages[0], hashes[1])
|
||||
@ -320,13 +321,13 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns items when gt is defined', () => {
|
||||
const messages = db.iterator({ limit: -1})
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
const gt = messages[2]
|
||||
|
||||
const messages2 = db.iterator({ gt: gt, limit: 100 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages2.length, 2)
|
||||
assert.equal(messages2[0], messages[messages.length - 2])
|
||||
@ -338,7 +339,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns one item after head when lt is the head', () => {
|
||||
const messages = db.iterator({ lt: last(hashes) })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 1)
|
||||
assert.equal(messages[0], hashes[hashes.length - 2])
|
||||
@ -347,7 +348,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns all items when lt is head and limit is -1', () => {
|
||||
const messages = db.iterator({ lt: last(hashes), limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, hashes.length - 1)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -357,7 +358,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns 3 items when lt is head and limit is 3', () => {
|
||||
const messages = db.iterator({ lt: last(hashes), limit: 3 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 3)
|
||||
assert.equal(messages[0], hashes[hashes.length - 4])
|
||||
@ -372,7 +373,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns one item when lte is the root item', () => {
|
||||
const messages = db.iterator({ lte: hashes[0] })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 1)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -381,7 +382,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns all items when lte is the head', () => {
|
||||
const messages = db.iterator({ lte: last(hashes), limit: -1 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, itemCount)
|
||||
assert.equal(messages[0], hashes[0])
|
||||
@ -391,7 +392,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it('returns 3 items when lte is the head', () => {
|
||||
const messages = db.iterator({ lte: last(hashes), limit: 3 })
|
||||
.collect()
|
||||
.map((e) => e.hash)
|
||||
.map((e) => e.cid)
|
||||
|
||||
assert.equal(messages.length, 3)
|
||||
assert.equal(messages[0], hashes[hashes.length - 3])
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -27,11 +28,11 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if (ipfsd)
|
||||
|
@ -80,15 +80,15 @@ Object.keys(testAPIs).forEach(API => {
|
||||
ipfs2 = ipfsd2.api
|
||||
// Connect the peers manually to speed up test times
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
|
||||
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if(orbitdb2)
|
||||
if(orbitdb2)
|
||||
await orbitdb2.stop()
|
||||
|
||||
if (ipfsd1)
|
||||
@ -102,8 +102,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
let options = {}
|
||||
// Set write access for both clients
|
||||
options.write = [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
|
||||
console.log("Creating databases and waiting for peers to connect")
|
||||
@ -117,7 +117,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
localDatabases.push(db)
|
||||
}
|
||||
|
||||
// Open the databases on the second node, set 'sync' flag so that
|
||||
// Open the databases on the second node, set 'sync' flag so that
|
||||
// the second peer fetches the db manifest from the network
|
||||
options = Object.assign({}, options, { sync: true })
|
||||
for (let [index, dbInterface] of databaseInterfaces.entries()) {
|
||||
@ -149,7 +149,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
for (let i = 1; i < entryCount + 1; i ++)
|
||||
entryArr.push(i)
|
||||
|
||||
// Result state,
|
||||
// Result state,
|
||||
// we count how many times 'replicated' event was fired per db
|
||||
let replicated = {}
|
||||
localDatabases.forEach(db => {
|
||||
|
@ -1,265 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const fs = require('fs')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const assert = require('assert')
|
||||
const pMap = require('p-map')
|
||||
const pEachSeries = require('p-each-series')
|
||||
const pWhilst = require('p-whilst')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
const startIpfs = require('./utils/start-ipfs')
|
||||
|
||||
// Settings for the test ipfs daemons
|
||||
const config = require('./utils/config.js')
|
||||
|
||||
describe.skip('OrbitDB - Network Stress Tests', function() {
|
||||
// We need a huge timeout since we're running
|
||||
// very long-running tests (takes minutes)
|
||||
this.timeout(1000 * 60 * 60) // 1 hour
|
||||
|
||||
const tests = [
|
||||
{
|
||||
description: '1 update - 2 peers - as fast as possible',
|
||||
updates: 1,
|
||||
maxInterval: -1,
|
||||
minInterval: 0,
|
||||
sequential: false,
|
||||
content: 'Hello #',
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
// { name: 'daemon3' },
|
||||
// { name: 'daemon4' },
|
||||
// { name: 'daemon5' },
|
||||
// { name: 'daemon6' },
|
||||
// Don't go beyond 6...
|
||||
// { name: 'daemon7' },
|
||||
// { name: 'daemon8' },
|
||||
],
|
||||
},
|
||||
{
|
||||
description: '32 update - concurrent - 2 peers - random interval',
|
||||
updates: 32,
|
||||
maxInterval: 2000,
|
||||
minInterval: 10,
|
||||
sequential: false,
|
||||
content: 'Hello random! ',
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
],
|
||||
},
|
||||
{
|
||||
description: '1000 update concurrently - 2 peers - as fast as possible',
|
||||
updates: 1000,
|
||||
maxInterval: -1,
|
||||
minInterval: 0,
|
||||
sequential: false,
|
||||
content: 'Hello #',
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
],
|
||||
},
|
||||
{
|
||||
description: '200 update as Buffers sequentially - 2 peers - as fast as possible',
|
||||
updates: 200,
|
||||
maxInterval: -1,
|
||||
minInterval: 0,
|
||||
sequential: true,
|
||||
content: Buffer.from('👻'),
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
],
|
||||
},
|
||||
{
|
||||
description: '50 update over a period long time - 6 peers - slow, random write intervals',
|
||||
updates: 50,
|
||||
maxInterval: 3000,
|
||||
minInterval: 1000,
|
||||
sequential: false,
|
||||
content: 'Terve! ',
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
{ name: 'daemon3' },
|
||||
{ name: 'daemon4' },
|
||||
{ name: 'daemon5' },
|
||||
{ name: 'daemon6' },
|
||||
],
|
||||
},
|
||||
{
|
||||
description: '50 update over a period long time - 8 peers - slow, random write intervals',
|
||||
updates: 100,
|
||||
maxInterval: 3000,
|
||||
minInterval: 1000,
|
||||
sequential: false,
|
||||
content: 'Terve! ',
|
||||
clients: [
|
||||
{ name: 'daemon1' },
|
||||
{ name: 'daemon2' },
|
||||
{ name: 'daemon3' },
|
||||
{ name: 'daemon4' },
|
||||
{ name: 'daemon5' },
|
||||
{ name: 'daemon6' },
|
||||
{ name: 'daemon7' },
|
||||
{ name: 'daemon8' },
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
const rootPath = './orbitdb/network-tests/'
|
||||
const channelName = 'orbitdb-network-stress-tests'
|
||||
|
||||
tests.forEach(test => {
|
||||
it(test.description, (done) => {
|
||||
const updateCount = test.updates
|
||||
const maxInterval = test.maxInterval || -1
|
||||
const minInterval = test.minInterval || 0
|
||||
const sequential = test.sequential
|
||||
const clientData = test.clients
|
||||
|
||||
rmrf.sync(rootPath)
|
||||
|
||||
// Create IPFS instances
|
||||
const createIpfsInstance = (c) => {
|
||||
const repoPath = path.join(rootPath, c.name, '/ipfs' + new Date().getTime())
|
||||
console.log("Starting IPFS instance <<>>", repoPath)
|
||||
return startIpfs(Object.assign({}, config.defaultIpfsConfig, {
|
||||
repo: repoPath,
|
||||
start: true,
|
||||
}))
|
||||
}
|
||||
|
||||
const createOrbitDB = async (databaseConfig, ipfs) => {
|
||||
const orbitdb = new OrbitDB(ipfs, path.join('./orbitdb/network-tests/', databaseConfig.name))
|
||||
const db = await orbitdb.eventlog(databaseConfig.address, {
|
||||
write: ['*']
|
||||
})
|
||||
return db
|
||||
}
|
||||
|
||||
let allTasks = []
|
||||
|
||||
const setupAllTasks = (databases) => {
|
||||
// Create the payloads
|
||||
let texts = []
|
||||
for (let i = 1; i < updateCount + 1; i ++) {
|
||||
texts.push(test.content + i)
|
||||
}
|
||||
|
||||
const setupUpdates = (client) => texts.reduce((res, acc) => {
|
||||
return res.concat([{ db: client, content: acc }])
|
||||
}, [])
|
||||
|
||||
allTasks = databases.map(db => {
|
||||
return {
|
||||
name: db.id,
|
||||
tasks: setupUpdates(db),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const runAllTasks = () => {
|
||||
if (sequential) {
|
||||
return pEachSeries(allTasks, e => pEachSeries(e.tasks, writeToDB))
|
||||
.then(() => console.log())
|
||||
} else {
|
||||
return pMap(allTasks, e => pEachSeries(e.tasks, writeToDB))
|
||||
.then(() => console.log())
|
||||
}
|
||||
}
|
||||
|
||||
let i = 0
|
||||
const writeToDB = (task) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (maxInterval === -1) {
|
||||
task.db.add(task.content)
|
||||
.then(() => process.stdout.write(`\rUpdates (${databases.length} peers): ${Math.floor(++i)} / ${updateCount}`))
|
||||
.then(resolve)
|
||||
.catch(reject)
|
||||
} else {
|
||||
setTimeout(() => {
|
||||
task.db.add(task.content)
|
||||
.then(() => process.stdout.write(`\rUpdates (${databases.length} peers): ${Math.floor(++i)} / ${updateCount}`))
|
||||
.then(resolve)
|
||||
.catch(reject)
|
||||
}, Math.floor(Math.random() * maxInterval) + minInterval)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const waitForAllTasks = (address) => {
|
||||
let msgCount = 0
|
||||
return pWhilst(
|
||||
() => msgCount < databases.length * databases.length * updateCount,
|
||||
() => new Promise(resolve => {
|
||||
return queryDatabases(address)
|
||||
.then(res => {
|
||||
msgCount = res.reduce((val, acc) => val += acc.length, 0)
|
||||
})
|
||||
.then(() => process.stdout.write(`\rUpdated (${databases.length} peers): ` + msgCount.toString() + ' / ' + (updateCount * databases.length * databases.length)))
|
||||
.then(() => setTimeout(resolve, 100))
|
||||
})
|
||||
)
|
||||
.then(() => process.stdout.write(`\rUpdated (${databases.length} peers): ` + msgCount.toString() + ' / ' + (updateCount * databases.length * databases.length) + '\n'))
|
||||
}
|
||||
|
||||
const queryDatabases = () => {
|
||||
return pMap(databases, db => db.iterator({ limit: -1 }).collect(), { concurrency: 2 })
|
||||
}
|
||||
|
||||
// All our databases instances
|
||||
let databases = []
|
||||
let addr
|
||||
|
||||
// Start the test
|
||||
pMap(clientData, (c, idx) => {
|
||||
return createIpfsInstance(c)
|
||||
.then(async (ipfs) => {
|
||||
let db
|
||||
if (idx === 0 && !addr) {
|
||||
c.address = channelName
|
||||
db = await createOrbitDB(c, ipfs)
|
||||
addr = db.address.toString()
|
||||
} else if (addr) {
|
||||
c.address = addr
|
||||
db = await createOrbitDB(c, ipfs)
|
||||
} else {
|
||||
console.error("Address not defined!")
|
||||
}
|
||||
return db
|
||||
})
|
||||
}, { concurrency: 1 })
|
||||
.then((result) => databases = result)
|
||||
.then(() => setupAllTasks(databases))
|
||||
.then(() => console.log(`Applying ${updateCount} updates per peer. This will take a while...`))
|
||||
.then(() => runAllTasks())
|
||||
.then(() => console.log('Done. Waiting for all updates to reach the peers...'))
|
||||
.then(() => waitForAllTasks(addr))
|
||||
.then(() => queryDatabases())
|
||||
.then((result) => {
|
||||
// Both databases have the same amount of entries
|
||||
result.forEach(entries => {
|
||||
assert.equal(entries.length, updateCount * databases.length)
|
||||
})
|
||||
|
||||
// Both databases have the same entries in the same order
|
||||
result.reduce((prev, entries) => {
|
||||
assert.deepEqual(entries, prev)
|
||||
return entries
|
||||
}, result[0])
|
||||
|
||||
// Success! Cleanup and finish
|
||||
pEachSeries(databases, db => {
|
||||
db.close()
|
||||
db._ipfs.stop()
|
||||
})
|
||||
.then(() => done())
|
||||
})
|
||||
.catch(done)
|
||||
})
|
||||
})
|
||||
})
|
@ -27,7 +27,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb = new OrbitDB(ipfs, dbPath)
|
||||
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
@ -50,43 +50,42 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
|
||||
it('parse address successfully', () => {
|
||||
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
|
||||
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
|
||||
const result = OrbitDB.parseAddress(address)
|
||||
|
||||
const isInstanceOf = result instanceof OrbitDBAddress
|
||||
assert.equal(isInstanceOf, true)
|
||||
|
||||
assert.equal(result.root, 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC')
|
||||
assert.equal(result.root, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
|
||||
assert.equal(result.path, 'first-database')
|
||||
|
||||
assert.equal(result.toString().indexOf('/orbitdb'), 0)
|
||||
assert.equal(result.toString().indexOf('Qm'), 9)
|
||||
assert.equal(result.toString().indexOf('zd'), 9)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isValid Address', () => {
|
||||
it('throws an error if address is empty', () => {
|
||||
assert.throws(() => {
|
||||
const result = OrbitDB.isValidAddress('')
|
||||
})
|
||||
it('returns false for empty string', () => {
|
||||
const result = OrbitDB.isValidAddress('')
|
||||
assert.equal(result, false)
|
||||
})
|
||||
|
||||
it('validate address successfully', () => {
|
||||
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
|
||||
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
|
||||
const result = OrbitDB.isValidAddress(address)
|
||||
|
||||
assert.equal(result, true)
|
||||
})
|
||||
|
||||
it('handle missing orbitdb prefix', () => {
|
||||
const address = 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
|
||||
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
|
||||
const result = OrbitDB.isValidAddress(address)
|
||||
|
||||
assert.equal(result, true)
|
||||
})
|
||||
|
||||
it('handle missing db address name', () => {
|
||||
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC'
|
||||
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
|
||||
const result = OrbitDB.isValidAddress(address)
|
||||
|
||||
assert.equal(result, true)
|
||||
|
@ -3,6 +3,7 @@
|
||||
const assert = require('assert')
|
||||
const mapSeries = require('p-map-series')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -20,11 +21,11 @@ const ipfsPath = './orbitdb/tests/persistency/ipfs'
|
||||
const tests = [
|
||||
{
|
||||
title: 'Persistency',
|
||||
orbitDBConfig: {}
|
||||
orbitDBConfig: { directory: path.join(dbPath, '1') }
|
||||
},
|
||||
{
|
||||
title: 'Persistency with custom cache',
|
||||
orbitDBConfig: { cache: CustomTestCache }
|
||||
orbitDBConfig: { directory: path.join(dbPath, '1'), cache: CustomTestCache }
|
||||
}
|
||||
]
|
||||
|
||||
@ -43,7 +44,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', test.orbitDBConfig)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, test.orbitDBConfig)
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
|
@ -38,17 +38,17 @@ Object.keys(testAPIs).forEach(API => {
|
||||
ipfsd2 = await startIpfs(API, config.daemon2)
|
||||
ipfs1 = ipfsd1.api
|
||||
ipfs2 = ipfsd2.api
|
||||
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
|
||||
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
|
||||
// Connect the peers manually to speed up test times
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if(orbitdb2)
|
||||
if(orbitdb2)
|
||||
await orbitdb2.stop()
|
||||
|
||||
if (ipfsd1)
|
||||
@ -63,8 +63,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const openDatabases1 = async (options) => {
|
||||
// Set write access for both clients
|
||||
options.write = [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath1 })
|
||||
@ -77,8 +77,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
const openDatabases = async (options) => {
|
||||
// Set write access for both clients
|
||||
options.write = [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath1, create: true })
|
||||
@ -129,10 +129,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
// Set write access for both clients
|
||||
let options = {
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
write: [
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
// Get the previous address to make sure nothing mutates it
|
||||
|
@ -38,8 +38,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
ipfsd2 = await startIpfs(API, config.daemon2)
|
||||
ipfs1 = ipfsd1.api
|
||||
ipfs2 = ipfsd2.api
|
||||
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
|
||||
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
|
||||
// Connect the peers manually to speed up test times
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
})
|
||||
@ -62,8 +62,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
let options = {}
|
||||
// Set write access for both clients
|
||||
options.write = [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath1 })
|
||||
@ -137,7 +137,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
reject(new Error("Should not receive the 'replicated' event!"))
|
||||
})
|
||||
|
||||
// Can't check this for now as db1 might've sent the heads to db2
|
||||
// Can't check this for now as db1 might've sent the heads to db2
|
||||
// before we subscribe to the event
|
||||
db2.events.on('replicate.progress', (address, hash, entry) => {
|
||||
try {
|
||||
|
@ -1,4 +1,4 @@
|
||||
'use strict'
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const mapSeries = require('p-each-series')
|
||||
@ -23,7 +23,7 @@ const ipfsPath2 = './orbitdb/tests/replication/2/ipfs'
|
||||
|
||||
Object.keys(testAPIs).forEach(API => {
|
||||
describe(`orbit-db - Replication (${API})`, function() {
|
||||
this.timeout(config.timeout)
|
||||
this.timeout(100000)
|
||||
|
||||
let ipfsd1, ipfsd2, ipfs1, ipfs2
|
||||
let orbitdb1, orbitdb2, db1, db2
|
||||
@ -45,10 +45,10 @@ Object.keys(testAPIs).forEach(API => {
|
||||
ipfs2 = ipfsd2.api
|
||||
// Use memory store for quicker tests
|
||||
const memstore = new MemStore()
|
||||
ipfs1.object.put = memstore.put.bind(memstore)
|
||||
ipfs1.object.get = memstore.get.bind(memstore)
|
||||
ipfs2.object.put = memstore.put.bind(memstore)
|
||||
ipfs2.object.get = memstore.get.bind(memstore)
|
||||
ipfs1.dag.put = memstore.put.bind(memstore)
|
||||
ipfs1.dag.get = memstore.get.bind(memstore)
|
||||
ipfs2.dag.put = memstore.put.bind(memstore)
|
||||
ipfs2.dag.get = memstore.get.bind(memstore)
|
||||
// Connect the peers manually to speed up test times
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
})
|
||||
@ -63,43 +63,43 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
beforeEach(async () => {
|
||||
clearInterval(timer)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
|
||||
|
||||
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
|
||||
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
|
||||
|
||||
options = {
|
||||
options = {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
write: [
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath1 })
|
||||
options = Object.assign({}, options, { directory: dbPath1 })
|
||||
db1 = await orbitdb1.eventlog('replication-tests', options)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
clearInterval(timer)
|
||||
options = {}
|
||||
|
||||
if (db1)
|
||||
await db1.drop()
|
||||
|
||||
if (db2)
|
||||
await db2.drop()
|
||||
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if(orbitdb2)
|
||||
if(orbitdb2)
|
||||
await orbitdb2.stop()
|
||||
})
|
||||
|
||||
it('replicates database of 1 entry', async () => {
|
||||
// Set 'sync' flag on. It'll prevent creating a new local database and rather
|
||||
// fetch the database from the network
|
||||
options = Object.assign({}, options, { path: dbPath2, sync: true })
|
||||
options = Object.assign({}, options, { directory: dbPath2, sync: true })
|
||||
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
|
||||
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
|
||||
|
||||
@ -115,7 +115,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
|
||||
it('replicates database of 100 entries', async () => {
|
||||
options = Object.assign({}, options, { path: dbPath2, sync: true })
|
||||
options = Object.assign({}, options, { directory: dbPath2, sync: true })
|
||||
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
|
||||
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
|
||||
|
||||
@ -147,7 +147,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
|
||||
it('emits correct replication info', async () => {
|
||||
options = Object.assign({}, options, { path: dbPath2, sync: true })
|
||||
options = Object.assign({}, options, { directory: dbPath2, sync: true })
|
||||
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
|
||||
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
|
||||
|
||||
@ -158,18 +158,18 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
db2.events.on('replicate', (address, entry) => {
|
||||
eventCount['replicate'] ++
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
entry: entry,
|
||||
})
|
||||
})
|
||||
|
||||
db2.events.on('replicate.progress', (address, hash, entry, progress, total) => {
|
||||
eventCount['replicate.progress'] ++
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
entry: entry ,
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
@ -180,15 +180,15 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
db2.events.on('replicated', (address) => {
|
||||
eventCount['replicated'] ++
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
progress: db2.replicationStatus.progress,
|
||||
},
|
||||
})
|
||||
// Resolve with a little timeout to make sure we
|
||||
// Resolve with a little timeout to make sure we
|
||||
// don't receive more than one event
|
||||
setTimeout(() => {
|
||||
finished = db2.iterator({ limit: -1 }).collect().length === expectedEventCount
|
||||
@ -256,18 +256,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
}
|
||||
|
||||
await mapSeries(adds, add)
|
||||
console.log()
|
||||
|
||||
// Open second instance again
|
||||
options = {
|
||||
path: dbPath2,
|
||||
directory: dbPath2 + '1',
|
||||
overwrite: true,
|
||||
sync: true,
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
}
|
||||
|
||||
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
|
||||
@ -275,9 +269,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
db2.events.on('replicate', (address, entry) => {
|
||||
eventCount['replicate'] ++
|
||||
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', db2.replicationStatus.progress, '/', db2.replicationStatus.max, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
entry: entry,
|
||||
})
|
||||
})
|
||||
@ -286,9 +280,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
eventCount['replicate.progress'] ++
|
||||
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', db2.replicationStatus.progress, '/', db2.replicationStatus.max, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
|
||||
// assert.equal(db2.replicationStatus.progress, eventCount['replicate.progress'])
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
entry: entry ,
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
@ -311,15 +305,15 @@ Object.keys(testAPIs).forEach(API => {
|
||||
reject(e)
|
||||
}
|
||||
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
progress: db2.replicationStatus.progress,
|
||||
},
|
||||
})
|
||||
// Resolve with a little timeout to make sure we
|
||||
// Resolve with a little timeout to make sure we
|
||||
// don't receive more than one event
|
||||
setTimeout( async () => {
|
||||
// console.log(eventCount['replicate.progress'], expectedEventCount)
|
||||
@ -386,25 +380,21 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
// Open second instance again
|
||||
let options = {
|
||||
path: dbPath2,
|
||||
directory: dbPath2,
|
||||
overwrite: true,
|
||||
sync: true,
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
}
|
||||
|
||||
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
|
||||
assert.equal(db1.address.toString(), db2.address.toString())
|
||||
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
|
||||
|
||||
db2.events.on('replicate', (address, entry) => {
|
||||
eventCount['replicate'] ++
|
||||
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicate',
|
||||
count: eventCount['replicate'],
|
||||
entry: entry,
|
||||
})
|
||||
})
|
||||
@ -414,9 +404,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
eventCount['replicate.progress'] ++
|
||||
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
|
||||
// assert.equal(current, total)
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
events.push({
|
||||
event: 'replicate.progress',
|
||||
count: eventCount['replicate.progress'],
|
||||
entry: entry ,
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
@ -435,9 +425,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
reject(e)
|
||||
}
|
||||
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
events.push({
|
||||
event: 'replicated',
|
||||
count: eventCount['replicate'],
|
||||
replicationInfo: {
|
||||
max: db2.replicationStatus.max,
|
||||
progress: db2.replicationStatus.progress,
|
||||
@ -445,7 +435,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
|
||||
if (db2.replicationStatus.max >= expectedEventCount * 2
|
||||
&& db2.replicationStatus.progress >= expectedEventCount * 2)
|
||||
&& db2.replicationStatus.progress >= expectedEventCount * 2)
|
||||
finished = true
|
||||
})
|
||||
|
||||
@ -501,4 +491,4 @@ Object.keys(testAPIs).forEach(API => {
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -36,16 +36,16 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath2)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath1)
|
||||
orbitdb2 = new OrbitDB(ipfs, dbPath2)
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: dbPath1 })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs, { directory: dbPath2 })
|
||||
db = await orbitdb1.log('replication status tests')
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if(orbitdb1)
|
||||
if(orbitdb1)
|
||||
await orbitdb1.stop()
|
||||
|
||||
if(orbitdb2)
|
||||
if(orbitdb2)
|
||||
await orbitdb2.stop()
|
||||
|
||||
if (ipfsd)
|
||||
|
@ -5,49 +5,47 @@ const ec = new EC('secp256k1')
|
||||
* A custom keystore example
|
||||
*/
|
||||
class CustomTestKeystore {
|
||||
constructor(signer) {
|
||||
this.createKey();
|
||||
constructor (storage) {
|
||||
// Use just one key throughout the keystore
|
||||
// for mock purposes
|
||||
this.key = this.createKey()
|
||||
}
|
||||
|
||||
createKey() {
|
||||
hasKey () {
|
||||
return this.key !== undefined ? true : false
|
||||
}
|
||||
|
||||
createKey (id) {
|
||||
const key = ec.genKeyPair()
|
||||
this.key = ec.keyPair({
|
||||
const keyPair = ec.keyPair({
|
||||
pub: key.getPublic('hex'),
|
||||
priv: key.getPrivate('hex'),
|
||||
privEnc: 'hex',
|
||||
pubEnc: 'hex',
|
||||
})
|
||||
|
||||
return keyPair
|
||||
}
|
||||
|
||||
getKey (id) {
|
||||
return this.key
|
||||
}
|
||||
|
||||
getKey() {
|
||||
return this.key
|
||||
}
|
||||
|
||||
// TODO: check if this is really in use
|
||||
generateKey() {
|
||||
return Promise.resolve(this.createKey())
|
||||
}
|
||||
|
||||
importPublicKey(key) {
|
||||
return Promise.resolve(ec.keyFromPublic(key, 'hex'))
|
||||
}
|
||||
|
||||
importPrivateKey(key) {
|
||||
return Promise.resolve(ec.keyFromPrivate(key, 'hex'))
|
||||
}
|
||||
|
||||
sign(key, data) {
|
||||
sign (key, data) {
|
||||
return Promise.resolve('<signature>')
|
||||
const sig = ec.sign(data, key)
|
||||
return Promise.resolve(sig.toDER('hex'))
|
||||
}
|
||||
|
||||
verify(signature, key, data) {
|
||||
let res = false
|
||||
res = ec.verify(data, signature, key)
|
||||
return Promise.resolve(res)
|
||||
verify (signature, publicKey, data) {
|
||||
return Promise.resolve(true)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new CustomTestKeystore()
|
||||
module.exports = (LocalStorage, mkdir) => {
|
||||
return {
|
||||
create: (directory) => {
|
||||
return new CustomTestKeystore()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,95 +1,63 @@
|
||||
'use strict'
|
||||
|
||||
const multihashing = require('multihashing-async')
|
||||
const mh = require('multihashes')
|
||||
const CID = require('cids')
|
||||
const pify = require('pify')
|
||||
|
||||
const defaultHashAlg = 'sha2-256'
|
||||
const createMultihash = pify(multihashing)
|
||||
|
||||
// 'use strict'
|
||||
|
||||
// const ImmutableDB = require('./immutabledb-interface')
|
||||
|
||||
const defaultFormat = { format: 'dag-cbor', hashAlg: 'sha2-256' }
|
||||
|
||||
/* ImmutableDB using IPLD (through IPFS) */
|
||||
class IPLDStore {
|
||||
constructor (ipfs) {
|
||||
// super()
|
||||
this._ipfs = ipfs
|
||||
const transformCborLinksIntoCids = (data) => {
|
||||
if (!data) {
|
||||
return data
|
||||
}
|
||||
|
||||
async put (value) {
|
||||
const cid = await this._ipfs.dag.put(value, defaultFormat)
|
||||
return cid.toBaseEncodedString()
|
||||
if (data['/']) {
|
||||
return new CID(data['/'])
|
||||
}
|
||||
|
||||
async get (key) {
|
||||
const result = await this._ipfs.dag.get(key)
|
||||
return result.value
|
||||
if (Array.isArray(data)) {
|
||||
return data.map(transformCborLinksIntoCids)
|
||||
}
|
||||
|
||||
if (typeof data === 'object') {
|
||||
return Object.keys(data).reduce((obj, key) => {
|
||||
obj[key] = transformCborLinksIntoCids(data[key])
|
||||
|
||||
return obj
|
||||
}, {})
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
const createMultihash = (data, hashAlg) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
multihashing(data, hashAlg || defaultHashAlg, (err, multihash) => {
|
||||
if (err)
|
||||
return reject(err)
|
||||
|
||||
resolve(mh.toB58String(multihash))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// const LRU = require('lru')
|
||||
// const ImmutableDB = require('./immutabledb-interface')
|
||||
// const createMultihash = require('./create-multihash')
|
||||
|
||||
/* Memory store using an LRU cache */
|
||||
class MemStore {
|
||||
constructor () {
|
||||
this._store = {}//new LRU(1000)
|
||||
this._store = new Map()
|
||||
}
|
||||
|
||||
async put (value) {
|
||||
const data = value//new Buffer(JSON.stringify(value))
|
||||
const hash = await createMultihash(data)
|
||||
// console.log(this._store)
|
||||
// this._store.set(hash, data)
|
||||
if (!this._store) this._store = {}
|
||||
// console.log(this._store)
|
||||
// console.log(hash, data)
|
||||
this._store[hash] = data
|
||||
// return hash
|
||||
return {
|
||||
toJSON: () => {
|
||||
return {
|
||||
data: value,
|
||||
multihash: hash,
|
||||
}
|
||||
}
|
||||
}
|
||||
const buffer = Buffer.from(JSON.stringify(value))
|
||||
const multihash = await createMultihash(buffer, 'sha2-256')
|
||||
const cid = new CID(1, 'dag-cbor', multihash)
|
||||
const key = cid.toBaseEncodedString()
|
||||
|
||||
this._store.set(key, value)
|
||||
|
||||
return cid
|
||||
}
|
||||
|
||||
async get (key) {
|
||||
// const data = this._store.get(key)
|
||||
const data = this._store[key]
|
||||
async get (cid) {
|
||||
if (CID.isCID(cid)) {
|
||||
cid = cid.toBaseEncodedString()
|
||||
}
|
||||
|
||||
// if (data) {
|
||||
// const value = JSON.parse(data)
|
||||
// return value
|
||||
// }
|
||||
const data = this._store.get(cid)
|
||||
|
||||
// return data
|
||||
return {
|
||||
toJSON: () => {
|
||||
return {
|
||||
data: this._store[key],
|
||||
multihash: key,
|
||||
}
|
||||
}
|
||||
value: transformCborLinksIntoCids(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = MemStore
|
||||
|
@ -23,19 +23,21 @@ const startIpfs = (type, config = {}) => {
|
||||
IPFSFactory
|
||||
.create(testAPIs[type])
|
||||
.spawn(config, async (err, ipfsd) => {
|
||||
if (err) {
|
||||
reject(err)
|
||||
if (err) {
|
||||
reject(err)
|
||||
}
|
||||
|
||||
// Monkey patch _peerInfo to the ipfs api/instance
|
||||
// to make js-ipfs-api compatible with js-ipfs
|
||||
// TODO: Get IPFS id via coherent API call (without it being asynchronous)
|
||||
if (!ipfsd.api._peerInfo) {
|
||||
let { id } = await ipfsd.api.id()
|
||||
ipfsd.api._peerInfo = { id: { _idB58String: id } }
|
||||
}
|
||||
setTimeout(async () => {
|
||||
if (!ipfsd.api._peerInfo) {
|
||||
let { id } = await ipfsd.api.id()
|
||||
ipfsd.api._peerInfo = { id: { _idB58String: id } }
|
||||
}
|
||||
|
||||
resolve(ipfsd)
|
||||
resolve(ipfsd)
|
||||
}, 500)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
const assert = require('assert')
|
||||
const rmrf = require('rimraf')
|
||||
const path = require('path')
|
||||
const OrbitDB = require('../src/OrbitDB')
|
||||
|
||||
// Include test utilities
|
||||
@ -10,7 +11,7 @@ const {
|
||||
startIpfs,
|
||||
stopIpfs,
|
||||
testAPIs,
|
||||
databases,
|
||||
databases
|
||||
} = require('./utils')
|
||||
|
||||
const dbPath = './orbitdb/tests/write-permissions'
|
||||
@ -28,8 +29,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
rmrf.sync(dbPath)
|
||||
ipfsd = await startIpfs(API, config.daemon1)
|
||||
ipfs = ipfsd.api
|
||||
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
|
||||
orbitdb2 = new OrbitDB(ipfs, dbPath + '/2')
|
||||
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
|
||||
orbitdb2 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '2') })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
@ -48,10 +49,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it(database.type + ' allows multiple writers', async () => {
|
||||
let options = {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
write: [
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
const db1 = await database.create(orbitdb1, 'sync-test', options)
|
||||
@ -75,10 +78,12 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it(database.type + ' syncs', async () => {
|
||||
let options = {
|
||||
// Set write access for both clients
|
||||
write: [
|
||||
orbitdb1.key.getPublic('hex'),
|
||||
orbitdb2.key.getPublic('hex')
|
||||
],
|
||||
accessController: {
|
||||
write: [
|
||||
orbitdb1.identity.publicKey,
|
||||
orbitdb2.identity.publicKey
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
const db1 = await database.create(orbitdb1, 'sync-test', options)
|
||||
@ -108,7 +113,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it(database.type + ' syncs', async () => {
|
||||
let options = {
|
||||
// Set write permission for everyone
|
||||
write: ['*'],
|
||||
accessController: {
|
||||
write: ['*']
|
||||
}
|
||||
}
|
||||
|
||||
const db1 = await database.create(orbitdb1, 'sync-test-public-dbs', options)
|
||||
@ -124,8 +131,8 @@ Object.keys(testAPIs).forEach(API => {
|
||||
setTimeout(async () => {
|
||||
const value = database.getTestValue(db1)
|
||||
assert.deepEqual(value, database.expectedValue)
|
||||
await db1.close()
|
||||
await db2.close()
|
||||
await db1.close()
|
||||
await db2.close()
|
||||
resolve()
|
||||
}, 300)
|
||||
})
|
||||
@ -139,14 +146,14 @@ Object.keys(testAPIs).forEach(API => {
|
||||
|
||||
let options = {
|
||||
// Only peer 1 can write
|
||||
write: [orbitdb1.key.getPublic('hex')],
|
||||
accessController: {
|
||||
write: [orbitdb1.identity.publicKey]
|
||||
}
|
||||
}
|
||||
let err
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath + '/sync-test/1' })
|
||||
options = Object.assign({}, options, { path: path.join(dbPath, '/sync-test/1') })
|
||||
const db1 = await database.create(orbitdb1, 'write error test 1', options)
|
||||
|
||||
options = Object.assign({}, options, { path: dbPath + '/sync-test/2', sync: true })
|
||||
options = Object.assign({}, options, { path: path.join(dbPath, '/sync-test/2'), sync: true })
|
||||
const db2 = await database.create(orbitdb2, 'write error test 1', options)
|
||||
|
||||
try {
|
||||
@ -156,8 +163,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
await database.tryInsert(db2)
|
||||
} catch (e) {
|
||||
// Make sure peer 2's instance throws an error
|
||||
assert.equal(e.toString(), 'Error: Not allowed to write')
|
||||
err = e.toString()
|
||||
}
|
||||
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
|
||||
|
||||
// Make sure nothing was added to the database
|
||||
assert.equal(database.query(db1).length, 0)
|
||||
@ -170,10 +178,10 @@ Object.keys(testAPIs).forEach(API => {
|
||||
setTimeout(async () => {
|
||||
// Make sure nothing was added
|
||||
assert.equal(database.query(db1).length, 0)
|
||||
await db1.close()
|
||||
await db2.close()
|
||||
if (err) {
|
||||
reject(err)
|
||||
await db1.close()
|
||||
await db2.close()
|
||||
if (!err) {
|
||||
reject(new Error('tryInsert should throw an err'))
|
||||
} else {
|
||||
resolve()
|
||||
}
|
||||
@ -188,7 +196,9 @@ Object.keys(testAPIs).forEach(API => {
|
||||
it(database.type + ' throws an error', async () => {
|
||||
let options = {
|
||||
// No write access (only creator of the database can write)
|
||||
write: [],
|
||||
accessController: {
|
||||
write: []
|
||||
}
|
||||
}
|
||||
|
||||
let err
|
||||
@ -200,7 +210,7 @@ Object.keys(testAPIs).forEach(API => {
|
||||
} catch (e) {
|
||||
err = e.toString()
|
||||
}
|
||||
assert.equal(err, 'Error: Not allowed to write')
|
||||
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Loading…
x
Reference in New Issue
Block a user