Merge pull request #623 from orbitdb/fix/persistent-keystore

Persistent connection in the keystore
This commit is contained in:
Mark Robert Henderson 2019-08-30 15:08:43 -04:00 committed by GitHub
commit dcbce5e01c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 3083 additions and 1441 deletions

View File

@ -27,7 +27,7 @@ jobs:
# To see the list of pre-built images that CircleCI provides for most common languages see
# https://circleci.com/docs/2.0/circleci-images/
docker:
- image: circleci/node:10-browsers
- image: circleci/node:10-browsers
steps:
# Machine Setup
# If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
@ -52,8 +52,7 @@ jobs:
# The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.
- run: if [ -z "${NODE_ENV:-}" ]; then export NODE_ENV=test; fi
- run: export PATH="~/orbitdb/orbit-db/node_modules/.bin:$PATH"
- run: rm -rf node_modules/
- run: npm install
- run: make rebuild
# Save dependency cache
# - save_cache:
# key: v1-dep-{{ .Branch }}-{{ epoch }}
@ -72,7 +71,6 @@ jobs:
# Test
# This would typically be a build job when using workflows, possibly combined with build
# The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.
- run: npm run test:all
# Teardown
# If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
# Save test results

2
API.md
View File

@ -98,7 +98,6 @@ const db = await orbitdb.keyvalue('profile')
> Creates and opens an OrbitDB database.
Returns a `Promise` that resolves to [a database instance](#store-api). `name` (string) should be the database name, not an OrbitDB address (i.e. `user.posts`). `type` is a supported database type (i.e. `eventlog` or [an added custom type](https://github.com/orbitdb/orbit-db#custom-store-types)). `options` is an object with any of the following properties:
- `directory` (string): The directory where data will be stored (Default: uses directory option passed to OrbitDB constructor or `./orbitdb` if none was provided).
- `write` (array): An array of hex encoded public keys which are used to set write access to the database. `["*"]` can be passed in to give write access to everyone. See the [GETTING STARTED](https://github.com/orbitdb/orbit-db/blob/master/GUIDE.md) guide for more info. (Default: uses the OrbitDB identity public key `orbitdb.identity.publicKey`, which would give write access only to yourself)
- `overwrite` (boolean): Overwrite an existing database (Default: `false`)
- `replicate` (boolean): Replicate the database with peers, requires IPFS PubSub. (Default: `true`)
@ -137,7 +136,6 @@ const dbAddress = await orbitdb.determineAddress('user.posts', 'eventlog', {
Returns a `Promise` that resolves to [a database instance](#store-api). `address` (string) should be a valid OrbitDB address. If a database name is provided instead, it will check `options.create` to determine if it should create the database. `options` is an object with any of the following properties:
- `localOnly` (boolean): If set to `true`, will throw an error if the database can't be found locally. (Default: `false`)
- `directory` (string): The directory where data will be stored (Default: uses directory option passed to OrbitDB constructor or `./orbitdb` if none was provided).
- `create` (boolean): Whether or not to create the database if a valid OrbitDB address is not provided. (Default: `false`)
- `type` (string): A supported database type (i.e. `eventlog` or [an added custom type](https://github.com/orbitdb/orbit-db#custom-store-types)). Required if create is set to `true`. Otherwise it's used to validate the manifest.
- `overwrite` (boolean): Overwrite an existing database (Default: `false`)

View File

@ -14,11 +14,12 @@ module.exports = {
devtool: 'none',
externals: {
fs: '{}',
mkdirp: '{}',
mkdirp: '{}'
},
node: {
console: false,
Buffer: true
Buffer: true,
mkdirp: 'empty'
},
plugins: [
new webpack.DefinePlugin({
@ -33,8 +34,8 @@ module.exports = {
path.resolve(__dirname, '../node_modules')
],
alias: {
leveldown: 'level-js',
},
leveldown: 'level-js'
}
},
resolveLoader: {
modules: [
@ -42,5 +43,5 @@ module.exports = {
path.resolve(__dirname, '../node_modules')
],
moduleExtensions: ['-loader']
},
}
}

View File

@ -13,11 +13,13 @@ module.exports = {
devtool: 'source-map',
externals: {
fs: '{}',
mkdirp: '{}',
mkdirp: '{}'
},
node: {
console: false,
Buffer: true
Buffer: true,
mkdirp: 'empty',
fs: 'empty'
},
plugins: [
],
@ -27,8 +29,8 @@ module.exports = {
path.resolve(__dirname, '../node_modules')
],
alias: {
leveldown: 'level-js',
},
leveldown: 'level-js'
}
},
resolveLoader: {
modules: [
@ -36,5 +38,5 @@ module.exports = {
path.resolve(__dirname, '../node_modules')
],
moduleExtensions: ['-loader']
},
}
}

View File

@ -12,6 +12,8 @@ module.exports = {
devtool: 'none',
node: {
Buffer: true,
mkdirp: 'empty',
fs: 'empty'
},
plugins: [
new webpack.DefinePlugin({
@ -24,7 +26,7 @@ module.exports = {
modules: [
'node_modules',
path.resolve(__dirname, '../node_modules')
],
]
},
resolveLoader: {
modules: [
@ -32,5 +34,5 @@ module.exports = {
path.resolve(__dirname, '../node_modules')
],
moduleExtensions: ['-loader']
},
}
}

View File

@ -1,5 +1,5 @@
const creatures = [
'🐙', '🐷', '🐬', '🐞',
'🐙', '🐷', '🐬', '🐞',
'🐈', '🙉', '🐸', '🐓',
'🐊', '🕷', '🐠', '🐘',
'🐼', '🐰', '🐶', '🐥'
@ -19,7 +19,7 @@ const readonlyCheckbox = document.getElementById("readonly")
function handleError(e) {
console.error(e.stack)
statusElm.innerHTML = e.message
statusElm.innerHTML = e.message
}
const main = (IPFS, ORBITDB) => {
@ -48,7 +48,7 @@ const main = (IPFS, ORBITDB) => {
const ipfs = new Ipfs({
repo: '/orbitdb/examples/browser/new/ipfs/0.33.1',
start: true,
preload: {
preload: {
enabled: false
},
EXPERIMENTAL: {
@ -151,9 +151,9 @@ const main = (IPFS, ORBITDB) => {
db = await orbitdb.open(name, {
// If database doesn't exist, create it
create: true,
create: true,
overwrite: true,
// Load only the local version of the database,
// Load only the local version of the database,
// don't load the latest from the network yet
localOnly: false,
type: type,

4081
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -15,23 +15,26 @@
"dependencies": {
"cids": "^0.7.1",
"ipfs-pubsub-1on1": "~0.0.4",
"is-node": "^1.0.2",
"localstorage-down": "^0.6.7",
"logplease": "^1.2.14",
"multihashes": "^0.4.12",
"orbit-db-access-controllers": "~0.2.0",
"orbit-db-cache": "~0.2.4",
"orbit-db-access-controllers": "~0.2.2",
"orbit-db-cache": "orbitdb/orbit-db-cache#master",
"orbit-db-counterstore": "~1.5.0",
"orbit-db-docstore": "~1.5.0",
"orbit-db-eventstore": "~1.5.0",
"orbit-db-feedstore": "~1.5.0",
"orbit-db-identity-provider": "~0.1.0",
"orbit-db-io": "~0.1.0",
"orbit-db-keystore": "^0.2.1",
"orbit-db-identity-provider": "github:orbitdb/orbit-db-identity-provider#master",
"orbit-db-io": "^0.1.1",
"orbit-db-keystore": "github:orbitdb/orbit-db-keystore#master",
"orbit-db-kvstore": "~1.5.0",
"orbit-db-pubsub": "~0.5.5",
"orbit-db-store": "~2.6.0"
"orbit-db-storage-adapter": "^0.4.2",
"orbit-db-store": "github:orbitdb/orbit-db-store#master"
},
"devDependencies": {
"babel-cli": "^6.26.0",
"babel-core": "^6.26.0",
"babel-loader": "^7.1.2",
"babel-plugin-transform-runtime": "^6.23.0",
@ -39,11 +42,11 @@
"babel-preset-env": "^1.7.0",
"datastore-level": "0.10.0",
"fs-extra": "^7.0.1",
"go-ipfs-dep": "~0.4.20",
"ipfs": "~0.36.4",
"ipfs-repo": "~0.26.6",
"ipfsd-ctl": "~0.42.3",
"localstorage-level-migration": "^0.0.1",
"go-ipfs-dep": "aphelionz/npm-go-ipfs-dep",
"localstorage-level-migration": "github:orbitdb/localstorage-level-migration#master",
"markdown-toc": "^1.2.0",
"mocha": "^5.2.0",
"p-each-series": "^1.0.0",
@ -55,6 +58,7 @@
"remark-cli": "^5.0.0",
"remark-validate-links": "^7.0.0",
"rimraf": "^2.6.2",
"standard": "^12.0.1",
"webpack": "^4.25.1",
"webpack-cli": "^3.1.2"
},
@ -74,6 +78,14 @@
"build:docs/toc": "markdown-toc --no-first1 -i README.md && markdown-toc --no-first1 -i API.md && markdown-toc --no-first1 -i GUIDE.md && markdown-toc --no-first1 -i CHANGELOG.md && markdown-toc --no-first1 -i FAQ.md ",
"build:es5": "babel src --out-dir ./dist/es5/ --presets babel-preset-env --plugins babel-plugin-transform-runtime"
},
"standard": {
"env": "mocha",
"ignore": [
"test/**",
"examples/**",
"benchmarks/**"
]
},
"localMaintainers": [
"haad <haad@haja.io>",
"shamb0t <shams@haja.io>",

View File

@ -1,5 +1,6 @@
'use strict'
const fs = require('fs')
const path = require('path')
const EventStore = require('orbit-db-eventstore')
const FeedStore = require('orbit-db-feedstore')
@ -15,9 +16,11 @@ const OrbitDBAddress = require('./orbit-db-address')
const createDBManifest = require('./db-manifest')
const exchangeHeads = require('./exchange-heads')
const { isDefined, io } = require('./utils')
const Storage = require('orbit-db-storage-adapter')
const leveldown = require('leveldown')
const Logger = require('logplease')
const logger = Logger.create("orbit-db")
const logger = Logger.create('orbit-db')
Logger.setLogLevel('ERROR')
// Mapping for 'database type' -> Class
@ -26,26 +29,25 @@ let databaseTypes = {
'eventlog': EventStore,
'feed': FeedStore,
'docstore': DocumentStore,
'keyvalue': KeyValueStore,
'keyvalue': KeyValueStore
}
class OrbitDB {
constructor(ipfs, identity, options = {}) {
if (!isDefined(ipfs))
throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
class OrbitDB {
constructor (ipfs, identity, options = {}) {
if (!isDefined(ipfs)) { throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance') }
if (!isDefined(identity))
throw new Error('identity is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
if (!isDefined(identity)) { throw new Error('identity is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance') }
this._ipfs = ipfs
this.identity = identity
this.id = options.peerId
this._pubsub = options && options.broker
? new options.broker(this._ipfs)
? new options.broker(this._ipfs) // eslint-disable-line
: new Pubsub(this._ipfs, this.id)
this.directory = options.directory || './orbitdb'
this.keystore = options.keystore
this.cache = options.cache || Cache
this.caches = { 'default': options.cache }
this.storage = options.storage
this.stores = {}
this._directConnections = {}
// AccessControllers module can be passed in to enable
@ -54,24 +56,43 @@ let databaseTypes = {
}
static async createInstance (ipfs, options = {}) {
if (!isDefined(ipfs))
throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
if (!isDefined(ipfs)) { throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance') }
const { id } = await ipfs.id()
const directory = options.directory || './orbitdb'
const keystore = options.keystore || Keystore.create(path.join(directory, id, '/keystore'))
const identity = options.identity || await Identities.createIdentity({
id: options.id || id,
keystore: keystore,
})
options = Object.assign({}, options, {
peerId: id ,
directory: directory,
keystore: keystore
})
const orbitdb = new OrbitDB(ipfs, identity, options)
return orbitdb
if (!options.directory) { options.directory = './orbitdb' }
if (!options.storage) {
let storageOptions = {}
if (fs && fs.mkdirSync) {
storageOptions.preCreate = async (directory) => {
fs.mkdirSync(directory, { recursive: true })
}
}
options.storage = Storage(leveldown, storageOptions)
}
if (!options.keystore) {
const keystorePath = path.join(options.directory, id, '/keystore')
let keyStorage = await options.storage.createStore(keystorePath)
options.keystore = new Keystore(keyStorage)
}
if (!options.identity) {
options.identity = await Identities.createIdentity({
id: options.id || id,
keystore: options.keystore
})
}
if (!options.cache) {
const cachePath = path.join(options.directory, id, '/cache')
let cacheStorage = await options.storage.createStore(cachePath)
options.cache = new Cache(cacheStorage)
}
const finalOptions = Object.assign({}, options, { peerId: id })
return new OrbitDB(ipfs, options.identity, finalOptions)
}
/* Databases */
@ -113,9 +134,13 @@ let databaseTypes = {
}
async disconnect () {
//close Keystore
if (this.keystore.close)
await this.keystore.close()
// close Keystore
await this.keystore.close()
// close Cache
await Promise.all(Object.values(this.caches).map((cache) => {
return cache.close()
}))
// Close all open databases
const databases = Object.values(this.stores)
@ -152,21 +177,18 @@ let databaseTypes = {
// Get the type -> class mapping
const Store = databaseTypes[type]
if (!Store)
throw new Error(`Invalid database type '${type}'`)
if (!Store) { throw new Error(`Invalid database type '${type}'`) }
let accessController
if (options.accessControllerAddress) {
accessController = await AccessControllers.resolve(this, options.accessControllerAddress, options.accessController)
}
const cache = await this._loadCache(this.directory, address)
const opts = Object.assign({ replicate: true }, options, {
accessController: accessController,
keystore: this.keystore,
cache: cache,
onClose: this._onClose.bind(this),
cache: options.cache,
onClose: this._onClose.bind(this)
})
const identity = options.identity || this.identity
@ -179,16 +201,15 @@ let databaseTypes = {
// Subscribe to pubsub to get updates from peers,
// this is what hooks us into the message propagation layer
// and the p2p network
if(opts.replicate && this._pubsub)
this._pubsub.subscribe(addr, this._onMessage.bind(this), this._onPeerConnected.bind(this))
if (opts.replicate && this._pubsub) { this._pubsub.subscribe(addr, this._onMessage.bind(this), this._onPeerConnected.bind(this)) }
return store
}
// Callback for local writes to the database. We the update to pubsub.
_onWrite (address, entry, heads) {
if(!heads) throw new Error("'heads' not defined")
if(this._pubsub) this._pubsub.publish(address, heads)
if (!heads) throw new Error("'heads' not defined")
if (this._pubsub) this._pubsub.publish(address, heads)
}
// Callback for receiving a message from the network
@ -210,10 +231,11 @@ let databaseTypes = {
const getStore = address => this.stores[address]
const getDirectConnection = peer => this._directConnections[peer]
const onChannelCreated = channel => this._directConnections[channel._receiverID] = channel
const onChannelCreated = channel => { this._directConnections[channel._receiverID] = channel }
const onMessage = (address, heads) => this._onMessage(address, heads)
const channel = await exchangeHeads(
await exchangeHeads(
this._ipfs,
address,
peer,
@ -223,8 +245,7 @@ let databaseTypes = {
onChannelCreated
)
if (getStore(address))
getStore(address).events.emit('peer', peer)
if (getStore(address)) { getStore(address).events.emit('peer', peer) }
}
// Callback when database was closed
@ -239,16 +260,14 @@ let databaseTypes = {
delete this.stores[address]
}
async _determineAddress(name, type, options = {}) {
if (!OrbitDB.isValidType(type))
throw new Error(`Invalid database type '${type}'`)
async _determineAddress (name, type, options = {}) {
if (!OrbitDB.isValidType(type)) { throw new Error(`Invalid database type '${type}'`) }
if (OrbitDBAddress.isValid(name))
throw new Error(`Given database name is an address. Please give only the name of the database!`)
if (OrbitDBAddress.isValid(name)) { throw new Error(`Given database name is an address. Please give only the name of the database!`) }
// Create an AccessController, use IPFS AC as the default
options.accessController = Object.assign({}, { name: name , type: 'ipfs' }, options.accessController)
const accessControllerAddress = await AccessControllers.create(this, options.accessController.type, options.accessController || {})
options.accessController = Object.assign({}, { name: name, type: 'ipfs' }, options.accessController)
const accessControllerAddress = await AccessControllers.create(this, options.accessController.type, options.accessController || {})
// Save the manifest to IPFS
const manifestHash = await createDBManifest(this._ipfs, name, type, accessControllerAddress, options)
@ -262,31 +281,30 @@ let databaseTypes = {
/*
options = {
accessController: { write: [] } // array of keys that can write to this database
directory: './orbitdb', // directory in which to place the database files
overwrite: false, // whether we should overwrite the existing database if it exists
}
*/
async create (name, type, options = {}) {
logger.debug(`create()`)
// The directory to look databases from can be passed in as an option
const directory = options.directory || this.directory
logger.debug(`Creating database '${name}' as ${type} in '${directory}'`)
logger.debug(`Creating database '${name}' as ${type}`)
// Create the database address
const dbAddress = await this._determineAddress(name, type, options)
// Load the locally saved database information
const cache = await this._loadCache(directory, dbAddress)
options.cache = this.caches[options.directory || 'default']
if (!options.cache) {
const cacheStorage = await this.storage.createStore(options.directory)
this.caches[options.directory] = options.cache = new Cache(cacheStorage)
}
// Check if we have the database locally
const haveDB = await this._haveLocalData(cache, dbAddress)
const haveDB = await this._haveLocalData(options.cache, dbAddress)
if (haveDB && !options.overwrite)
throw new Error(`Database '${dbAddress}' already exists!`)
if (haveDB && !options.overwrite) { throw new Error(`Database '${dbAddress}' already exists!`) }
// Save the database locally
await this._addManifestToCache(directory, dbAddress)
await this._addManifestToCache(options.cache, dbAddress)
logger.debug(`Created database '${dbAddress}'`)
@ -294,7 +312,7 @@ let databaseTypes = {
return this.open(dbAddress, options)
}
async determineAddress(name, type, options = {}) {
async determineAddress (name, type, options = {}) {
const opts = Object.assign({}, { onlyHash: true }, options)
return this._determineAddress(name, type, opts)
}
@ -314,10 +332,6 @@ let databaseTypes = {
options = Object.assign({ localOnly: false, create: false }, options)
logger.debug(`Open database '${address}'`)
// The directory to look databases from can be passed in as an option
const directory = options.directory || this.directory
logger.debug(`Look from '${directory}'`)
// If address is just the name of database, check the options to crate the database
if (!OrbitDBAddress.isValid(address)) {
if (!options.create) {
@ -334,11 +348,10 @@ let databaseTypes = {
// Parse the database address
const dbAddress = OrbitDBAddress.parse(address)
// Load the locally saved db information
const cache = await this._loadCache(directory, dbAddress)
if (!options.cache) options.cache = this.caches['default']
// Check if we have the database
const haveDB = await this._haveLocalData(cache, dbAddress)
const haveDB = await this._haveLocalData(options.cache, dbAddress)
logger.debug((haveDB ? 'Found' : 'Didn\'t find') + ` database '${dbAddress}'`)
@ -356,11 +369,10 @@ let databaseTypes = {
logger.debug(`Manifest for '${dbAddress}':\n${JSON.stringify(manifest, null, 2)}`)
// Make sure the type from the manifest matches the type that was given as an option
if (options.type && manifest.type !== options.type)
throw new Error(`Database '${dbAddress}' is type '${manifest.type}' but was opened as '${options.type}'`)
if (options.type && manifest.type !== options.type) { throw new Error(`Database '${dbAddress}' is type '${manifest.type}' but was opened as '${options.type}'`) }
// Save the database locally
await this._addManifestToCache(directory, dbAddress)
await this._addManifestToCache(options.cache, dbAddress)
// Open the the database
options = Object.assign({}, options, { accessControllerAddress: manifest.accessController })
@ -368,24 +380,11 @@ let databaseTypes = {
}
// Save the database locally
async _addManifestToCache (directory, dbAddress) {
const cache = await this._loadCache(directory, dbAddress)
async _addManifestToCache (cache, dbAddress) {
await cache.set(path.join(dbAddress.toString(), '_manifest'), dbAddress.root)
logger.debug(`Saved manifest to IPFS as '${dbAddress.root}'`)
}
async _loadCache (directory, dbAddress) {
let cache
try {
cache = await this.cache.load(directory, dbAddress)
} catch (e) {
console.log(e)
logger.error("Couldn't load Cache:", e)
}
return cache
}
/**
* Check if we have the database, or part of it, saved locally
* @param {[Cache]} cache [The OrbitDBCache instance containing the local data]

View File

@ -6,7 +6,7 @@ const createDBManifest = async (ipfs, name, type, accessControllerAddress, optio
const manifest = {
name: name,
type: type,
accessController: path.join('/ipfs', accessControllerAddress),
accessController: path.join('/ipfs', accessControllerAddress)
}
return io.write(ipfs, options.format || 'dag-cbor', manifest, options)

View File

@ -3,7 +3,7 @@
const Channel = require('ipfs-pubsub-1on1')
const Logger = require('logplease')
const logger = Logger.create("exchange-heads", { color: Logger.Colors.Yellow })
const logger = Logger.create('exchange-heads', { color: Logger.Colors.Yellow })
Logger.setLogLevel('ERROR')
const getHeadsForDatabase = store => (store && store._oplog) ? store._oplog.heads : []

View File

@ -1,6 +1,5 @@
'use strict'
const path = require('path')
const multihash = require('multihashes')
const CID = require('cids')
const notEmpty = e => e !== '' && e !== ' '
@ -37,11 +36,9 @@ class OrbitDBAddress {
}
static parse (address) {
if (!address)
throw new Error(`Not a valid OrbitDB address: ${address}`)
if (!address) { throw new Error(`Not a valid OrbitDB address: ${address}`) }
if (!OrbitDBAddress.isValid(address))
throw new Error(`Not a valid OrbitDB address: ${address}`)
if (!OrbitDBAddress.isValid(address)) { throw new Error(`Not a valid OrbitDB address: ${address}`) }
const parts = address.toString()
.split('/')

View File

@ -104,7 +104,7 @@ Object.keys(testAPIs).forEach(API => {
write: [
orbitdb1.identity.id,
orbitdb2.identity.id
],
]
}
}

View File

@ -11,6 +11,7 @@ const OrbitDB = require('../src/OrbitDB')
const OrbitDBAddress = require('../src/orbit-db-address')
const Identities = require('orbit-db-identity-provider')
const io = require('orbit-db-io')
// Include test utilities
const {
config,
@ -95,7 +96,7 @@ Object.keys(testAPIs).forEach(API => {
describe('Success', function() {
before(async () => {
db = await orbitdb.create('second', 'feed', { replicate: false })
localDataPath = path.join(dbPath, db.address.root, db.address.path)
localDataPath = path.join(dbPath, orbitdb.id, 'cache')
await db.close()
})
@ -114,27 +115,17 @@ Object.keys(testAPIs).forEach(API => {
})
it('saves database manifest reference locally', async () => {
const manifestHash = db.address.root
const address = db.address.toString()
levelup(leveldown(localDataPath), (err, db) => {
if (err) {
assert.equal(err, null)
}
db.get(address + '/_manifest', (err, value) => {
if (err) {
assert.equal(err, null)
}
const data = JSON.parse(value || '{}')
assert.equal(data, manifestHash)
})
})
const address = db.id
const manifestHash = address.split('/')[2]
await db._cache._store.open()
const value = await db._cache.get(address + '/_manifest')
assert.equal(value, manifestHash)
})
it('saves database manifest file locally', async () => {
const manifest = await io.read(ipfs, db.address.root)
assert(manifest)
const manifestHash = db.id.split('/')[2]
const manifest = await io.read(ipfs, manifestHash)
assert.notEqual(manifest, )
assert.equal(manifest.name, 'second')
assert.equal(manifest.type, 'feed')
assert.notEqual(manifest.accessController, null)
@ -144,8 +135,7 @@ Object.keys(testAPIs).forEach(API => {
it('can pass local database directory as an option', async () => {
const dir = './orbitdb/tests/another-feed'
db = await orbitdb.create('third', 'feed', { directory: dir })
localDataPath = path.join(dir, db.address.root, db.address.path)
assert.equal(fs.existsSync(localDataPath), true)
assert.equal(fs.existsSync(dir), true)
})
describe('Access Controller', function() {
@ -228,7 +218,7 @@ Object.keys(testAPIs).forEach(API => {
})
describe('Open', function() {
before(async () => {
beforeEach(async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed' })
})
@ -260,7 +250,7 @@ Object.keys(testAPIs).forEach(API => {
})
it('opens a database - with a different identity', async () => {
const identity = await Identities.createIdentity({ id: 'test-id' })
const identity = await Identities.createIdentity({ id: 'test-id', keystore: orbitdb.keystore })
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true, identity })
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('zd'), 9)

View File

@ -5,13 +5,15 @@ const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
const CustomCache = require('orbit-db-cache')
const localdown = require('localstorage-down')
const storage = require("orbit-db-storage-adapter")(localdown)
// Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
CustomTestCache,
databases,
} = require('./utils')
@ -25,6 +27,9 @@ Object.keys(testAPIs).forEach(API => {
let ipfsd, ipfs, orbitdb1
before(async () => {
const store = await storage.createStore("local")
const cache = new CustomCache(store)
config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath)
@ -32,7 +37,7 @@ Object.keys(testAPIs).forEach(API => {
ipfs = ipfsd.api
orbitdb1 = await OrbitDB.createInstance(ipfs, {
directory: path.join(dbPath, '1'),
cache: CustomTestCache
cache: cache
})
})

View File

@ -22,7 +22,7 @@ const ipfsPath = './orbitdb/tests/customKeystore/ipfs'
Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Use a Custom Keystore (${API})`, function() {
this.timeout(config.timeout)
this.timeout(20000)
let ipfsd, ipfs, orbitdb1
@ -32,10 +32,9 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
const identity = await Identities.createIdentity({ type:'custom'})
const identity = await Identities.createIdentity({ type: 'custom', keystore: CustomTestKeystore().create() })
orbitdb1 = await OrbitDB.createInstance(ipfs, {
directory: path.join(dbPath, '1'),
keystore: CustomTestKeystore().create(),
identity
})
})
@ -67,7 +66,7 @@ Object.keys(testAPIs).forEach(API => {
const options = {
accessController: {
// Set write access for both clients
write: [orbitdb1.identity.id],
write: [orbitdb1.identity.id]
}
}

View File

@ -46,13 +46,17 @@ Object.keys(testAPIs).forEach(API => {
describe('Drop', function() {
before(async () => {
db = await orbitdb.create('first', 'feed')
localDataPath = path.join(dbPath, db.address.root, db.address.path)
localDataPath = path.join(dbPath)
assert.equal(fs.existsSync(localDataPath), true)
})
it('removes local database files', async () => {
it('removes local database cache', async () => {
await db.drop()
assert.equal(fs.existsSync(localDataPath), false)
assert.equal(await db._cache.get(db.localHeadsPath), undefined)
assert.equal(await db._cache.get(db.remoteHeadsPath), undefined)
assert.equal(await db._cache.get(db.snapshotPath), undefined)
assert.equal(await db._cache.get(db.queuePath), undefined)
assert.equal(await db._cache.get(db.manifestPath), undefined)
})
})
})

View File

@ -5,14 +5,17 @@ const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
const Cache = require('orbit-db-cache')
const localdown = require('localstorage-down')
const Storage = require('orbit-db-storage-adapter')
// Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
CustomTestCache
testAPIs
} = require('./utils')
const dbPath = './orbitdb/tests/persistency'
@ -25,7 +28,8 @@ const tests = [
},
{
title: 'Persistency with custom cache',
orbitDBConfig: { directory: path.join(dbPath, '1'), cache: CustomTestCache }
type: "custom",
orbitDBConfig: { directory: path.join(dbPath, '1') }
}
]
@ -39,12 +43,20 @@ Object.keys(testAPIs).forEach(API => {
let ipfsd, ipfs, orbitdb1, db, address
before(async () => {
const options = Object.assign({}, test.orbitDBConfig)
if(test.type === "custom") {
const customStorage = Storage(localdown)
const customStore = await customStorage.createStore(dbPath)
options.cache = new Cache(customStore)
}
config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = await OrbitDB.createInstance(ipfs, test.orbitDBConfig)
orbitdb1 = await OrbitDB.createInstance(ipfs, options)
})
after(async () => {

View File

@ -133,7 +133,7 @@ Object.keys(testAPIs).forEach(API => {
write: [
orbitdb1.identity.id,
orbitdb2.identity.id
],
]
}
}

View File

@ -1,4 +1,4 @@
'use strict'
'use strict'
const assert = require('assert')
const mapSeries = require('p-each-series')
@ -72,7 +72,7 @@ Object.keys(testAPIs).forEach(API => {
write: [
orbitdb1.identity.id,
orbitdb2.identity.id
],
]
}
}
@ -205,9 +205,10 @@ Object.keys(testAPIs).forEach(API => {
assert.equal(eventCount['replicate.progress'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
const minClock = Math.min(...replicateEvents.filter(e => !!e.entry.clock).map(e => e.entry.clock.time))
assert.equal(replicateEvents.length, expectedEventCount)
assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateEvents[0].entry.clock.time, 1)
assert.equal(minClock, 1)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount)
@ -297,7 +298,7 @@ Object.keys(testAPIs).forEach(API => {
try {
// Test the replicator state
assert.equal(db2._loader.tasksRequested >= db2.replicationStatus.progress, true)
assert.equal(db2.options.referenceCount, 64)
assert.equal(db2.options.referenceCount, 32)
assert.equal(db2._loader.tasksRunning, 0)
} catch (e) {
reject(e)
@ -333,19 +334,23 @@ Object.keys(testAPIs).forEach(API => {
assert.equal(eventCount['replicate.progress'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
const maxClock = Math.max(...replicateEvents.filter(e => !!e.entry.clock).map(e => e.entry.clock.time))
assert.equal(replicateEvents.length, expectedEventCount)
assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateEvents[0].entry.clock.time, expectedEventCount)
assert.equal(maxClock, expectedEventCount)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
const maxProgressClock = Math.max(...replicateProgressEvents.filter(e => !!e.entry.clock).map(e => e.entry.clock.time))
const maxReplicationMax = Math.max(...replicateProgressEvents.map(e => e.replicationInfo.max))
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateProgressEvents[0].entry.clock.time, expectedEventCount)
assert.equal(replicateProgressEvents[0].replicationInfo.max, expectedEventCount)
assert.equal(maxProgressClock, expectedEventCount)
assert.equal(maxReplicationMax, expectedEventCount)
assert.equal(replicateProgressEvents[0].replicationInfo.progress, 1)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[0].replicationInfo.max, expectedEventCount)
const replicateMax = Math.max(...replicatedEvents.map(e => e.replicationInfo.max))
assert.equal(replicateMax, expectedEventCount)
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount)
resolve()
@ -456,10 +461,11 @@ Object.keys(testAPIs).forEach(API => {
assert.equal(replicateEvents.length, expectedEventCount)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
const maxProgressClock = Math.max(...replicateProgressEvents.filter(e => !!e.entry.clock).map(e => e.entry.clock.time))
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].entry.clock.time, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.max, expectedEventCount * 2)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)
assert.equal(maxProgressClock, expectedEventCount)
assert.equal(db2.replicationStatus.max, expectedEventCount * 2)
assert.equal(db2.replicationStatus.progress, expectedEventCount * 2)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)

View File

@ -1,10 +1,15 @@
'use strict'
const fs = require('fs')
const assert = require('assert')
const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB')
const Identities = require('orbit-db-identity-provider')
const Keystore = require('orbit-db-keystore')
const leveldown = require('leveldown')
const storage = require('orbit-db-storage-adapter')(leveldown)
// Include test utilities
const {
config,
@ -13,6 +18,7 @@ const {
testAPIs,
} = require('./utils')
const keysPath = './orbitdb/identity/identitykeys'
const dbPath = './orbitdb/tests/change-identity'
const ipfsPath = './orbitdb/tests/change-identity/ipfs'
@ -20,7 +26,7 @@ Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Set identities (${API})`, function() {
this.timeout(config.timeout)
let ipfsd, ipfs, orbitdb, db
let ipfsd, ipfs, orbitdb, db, keystore
let identity1, identity2
let localDataPath
@ -30,12 +36,18 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
identity1 = await Identities.createIdentity({ id: 'test-id1' })
identity2 = await Identities.createIdentity({ id: 'test-id2' })
if(fs && fs.mkdirSync) fs.mkdirSync(keysPath, { recursive: true })
const identityStore = await storage.createStore(keysPath)
keystore = new Keystore(identityStore)
identity1 = await Identities.createIdentity({ id: 'test-id1', keystore })
identity2 = await Identities.createIdentity({ id: 'test-id2', keystore })
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
})
after(async () => {
await keystore.close()
if(orbitdb)
await orbitdb.stop()

View File

@ -1,9 +1,7 @@
const OrbitDbCache = require('orbit-db-cache/Cache.js')
const localdown = require('localstorage-down')
const OrbitDbCache = require('orbit-db-cache')
/**
* A custom cache example. To create a differing custom example, orbitdb cache was
* used with another abstract-leveldown compliant storage, localdown as an example
*/
module.exports = OrbitDbCache(localdown)
module.exports = OrbitDbCache

View File

@ -40,7 +40,7 @@ class CustomTestKeystore {
verify (signature, publicKey, data) {
return Promise.resolve(true)
}
getPublic (key) {
return key.public.marshal()
}

View File

@ -13,6 +13,11 @@ const io = require('orbit-db-io')
const IPFS = require('ipfs')
const Identities = require('orbit-db-identity-provider')
const migrate = require('localstorage-level-migration')
const Keystore = require('orbit-db-keystore')
const storage = require('orbit-db-storage-adapter')(leveldown)
storage.preCreate = async (directory, options) => {
fs.mkdirSync(directory, { recursive: true })
}
// Include test utilities
const {
@ -25,32 +30,36 @@ const {
const dbPath = './orbitdb/tests/v0'
const keyFixtures = './test/fixtures/keys/QmRfPsKJs9YqTot5krRibra4gPwoK4kghhU8iKWxBjGDDX'
const dbFixturesDir = './test/fixtures/v0'
const dbFixturesDir = './test/fixtures/v0/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db'
const ipfsFixturesDir = './test/fixtures/ipfs'
Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Backward-Compatibility - Open & Load (${API})`, function() {
describe(`orbit-db - Backward-Compatibility - Open & Load (${API})`, function () {
this.timeout(config.timeout)
let ipfsd, ipfs, orbitdb, db, address
let ipfsd, ipfs, orbitdb, db, address, store
let localDataPath
before(async () => {
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
//copy data files to ipfs and orbitdb repos
rmrf.sync(dbPath)
// copy data files to ipfs and orbitdb repos
await fs.copy(path.join(ipfsFixturesDir, 'blocks'), path.join(ipfsd.path, 'blocks'))
await fs.copy(path.join(ipfsFixturesDir, 'datastore'), path.join(ipfsd.path, 'datastore'))
await fs.copy(dbFixturesDir, dbPath)
await fs.copy(dbFixturesDir, path.join(dbPath, ipfs._peerInfo.id._idB58String, 'cache'))
store = await storage.createStore(path.join(dbPath, ipfs._peerInfo.id._idB58String, 'keys'))
const keystore = new Keystore(store)
let identity = await Identities.createIdentity({ id: ipfs._peerInfo.id._idB58String, migrate: migrate(keyFixtures), keystore })
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath, identity, keystore })
let identity = await Identities.createIdentity({ id: ipfs._peerInfo.id._idB58String, migrate: migrate(keyFixtures), identityKeysPath: dbPath + '/keys' })
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath, identity })
})
after(async () => {
await store.close()
rmrf.sync(dbPath)
if(orbitdb)
await orbitdb.stop()
@ -60,7 +69,11 @@ Object.keys(testAPIs).forEach(API => {
describe('Open & Load', function() {
before(async () => {
db = await orbitdb.open('/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db', {accessController: { type: 'legacy-ipfs', skipManifest: true }})
db = await orbitdb.open('/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db', { accessController: { type: 'legacy-ipfs', skipManifest: true } })
const localFixtures = await db._cache.get('_localHeads')
const remoteFixtures = await db._cache.get('_remoteHeads')
db._cache.set(db.localHeadsPath, localFixtures)
db._cache.set(db.remoteHeadsPath, remoteFixtures)
await db.load()
})
@ -90,12 +103,11 @@ Object.keys(testAPIs).forEach(API => {
})
it('load v0 orbitdb address', async () => {
assert.equal(db.all.length, 3)
})
it('allows migrated key to write', async () => {
const hash = await db.add({ thing: 'new addition'})
const hash = await db.add({ thing: 'new addition' })
const newEntries = db.all.filter(e => e.v === 1)
assert.equal(newEntries.length, 1)
assert.strictEqual(newEntries[0].hash, hash)

View File

@ -53,7 +53,7 @@ Object.keys(testAPIs).forEach(API => {
write: [
orbitdb1.identity.id,
orbitdb2.identity.id
],
]
}
}
@ -92,7 +92,7 @@ Object.keys(testAPIs).forEach(API => {
await database.tryInsert(db2)
assert.equal(database.query(db1).length, 0)
assert.strictEqual(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
@ -124,7 +124,7 @@ Object.keys(testAPIs).forEach(API => {
await database.tryInsert(db2)
assert.equal(database.query(db1).length, 0)
assert.strictEqual(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
@ -165,10 +165,10 @@ Object.keys(testAPIs).forEach(API => {
// Make sure peer 2's instance throws an error
err = e.toString()
}
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
assert.strictEqual(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
// Make sure nothing was added to the database
assert.equal(database.query(db1).length, 0)
assert.strictEqual(database.query(db1).length, 0)
// Try to sync peer 1 with peer 2, this shouldn't produce anything
// at peer 1 (nothing was supposed to be added to the database by peer 2)
@ -177,7 +177,7 @@ Object.keys(testAPIs).forEach(API => {
return new Promise((resolve, reject) => {
setTimeout(async () => {
// Make sure nothing was added
assert.equal(database.query(db1).length, 0)
assert.strictEqual(database.query(db1).length, 0)
await db1.close()
await db2.close()
if (!err) {
@ -210,7 +210,7 @@ Object.keys(testAPIs).forEach(API => {
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
assert.strictEqual(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
})
})
})