def test_get_connection_raises_a_configuration_error(monkeypatch): from bigchaindb.common.exceptions import ConfigurationError from bigchaindb.backend import connect with pytest.raises(ConfigurationError): connect('msaccess', 'localhost', '1337', 'mydb') with pytest.raises(ConfigurationError): # We need to force a misconfiguration here monkeypatch.setattr('bigchaindb.backend.connection.BACKENDS', {'catsandra': 'bigchaindb.backend.meowmeow.Catsandra'}) connect('catsandra', 'localhost', '1337', 'mydb')
def __init__(self, public_key=None, private_key=None, keyring=[], connection=None, backlog_reassign_delay=None): """Initialize the Bigchain instance A Bigchain instance has several configuration parameters (e.g. host). If a parameter value is passed as an argument to the Bigchain __init__ method, then that is the value it will have. Otherwise, the parameter value will come from an environment variable. If that environment variable isn't set, then the value will come from the local configuration file. And if that variable isn't in the local configuration file, then the parameter will have its default value (defined in bigchaindb.__init__). Args: public_key (str): the base58 encoded public key for the ED25519 curve. private_key (str): the base58 encoded private key for the ED25519 curve. keyring (list[str]): list of base58 encoded public keys of the federation nodes. connection (:class:`~bigchaindb.backend.connection.Connection`): A connection to the database. """ config_utils.autoconfigure() self.me = public_key or bigchaindb.config['keypair']['public'] self.me_private = private_key or bigchaindb.config['keypair']['private'] self.nodes_except_me = keyring or bigchaindb.config['keyring'] self.backlog_reassign_delay = backlog_reassign_delay or bigchaindb.config['backlog_reassign_delay'] self.consensus = BaseConsensusRules self.connection = connection if connection else backend.connect(**bigchaindb.config['database']) if not self.me or not self.me_private: raise exceptions.KeypairNotFoundException()
def test_connection_run_errors(): from bigchaindb.backend import connect from bigchaindb.backend.exceptions import (DuplicateKeyError, OperationError, ConnectionError) conn = connect() query = mock.Mock() query.run.side_effect = pymongo.errors.AutoReconnect('foo') with pytest.raises(ConnectionError): conn.run(query) assert query.run.call_count == 2 query = mock.Mock() query.run.side_effect = pymongo.errors.DuplicateKeyError('foo') with pytest.raises(DuplicateKeyError): conn.run(query) assert query.run.call_count == 1 query = mock.Mock() query.run.side_effect = pymongo.errors.OperationFailure('foo') with pytest.raises(OperationError): conn.run(query) assert query.run.call_count == 1
def __init__(self, connection=None): """Initialize the Bigchain instance A Bigchain instance has several configuration parameters (e.g. host). If a parameter value is passed as an argument to the Bigchain __init__ method, then that is the value it will have. Otherwise, the parameter value will come from an environment variable. If that environment variable isn't set, then the value will come from the local configuration file. And if that variable isn't in the local configuration file, then the parameter will have its default value (defined in bigchaindb.__init__). Args: connection (:class:`~bigchaindb.backend.connection.Connection`): A connection to the database. """ config_utils.autoconfigure() self.mode_commit = 'broadcast_tx_commit' self.mode_list = ('broadcast_tx_async', 'broadcast_tx_sync', self.mode_commit) self.tendermint_host = bigchaindb.config['tendermint']['host'] self.tendermint_port = bigchaindb.config['tendermint']['port'] self.endpoint = 'http://{}:{}/'.format(self.tendermint_host, self.tendermint_port) validationPlugin = bigchaindb.config.get('validation_plugin') if validationPlugin: self.validation = config_utils.load_validation_plugin(validationPlugin) else: self.validation = BaseValidationRules self.connection = connection if connection else backend.connect(**bigchaindb.config['database'])
def test_store_abci_chain(description, stores, expected): conn = connect() for store in stores: query.store_abci_chain(conn, **store) actual = query.get_latest_abci_chain(conn) assert expected == actual, description
def test_drop(dummy_db): from bigchaindb import backend from bigchaindb.backend import schema conn = backend.connect() assert dummy_db in conn.conn.database_names() schema.drop_database(conn, dummy_db) assert dummy_db not in conn.conn.database_names()
def _setup_database(_configure_bigchaindb): from bigchaindb import config from bigchaindb.backend import connect print('Initializing test db') dbname = config['database']['name'] conn = connect() _drop_db(conn, dbname) schema.init_database(conn) print('Finishing init database') yield print('Deleting `{}` database'.format(dbname)) conn = connect() _drop_db(conn, dbname) print('Finished deleting `{}`'.format(dbname))
def test_get_owned_ids(signed_create_tx, user_pk): from bigchaindb.backend import connect, query conn = connect() # insert a transaction conn.db.transactions.insert_one(deepcopy(signed_create_tx.to_dict())) txns = list(query.get_owned_ids(conn, user_pk)) assert txns[0] == signed_create_tx.to_dict()
def run_remove_replicas(args): # Note: This command is specific to MongoDB conn = backend.connect() try: remove_replicas(conn, args.replicas) except (OperationError, NotImplementedError) as e: sys.exit(str(e)) else: print('Removed {} from the replicaset.'.format(args.replicas))
def run_set_replicas(args): conn = backend.connect() for table in ["bigchain", "backlog", "votes"]: # See https://www.rethinkdb.com/api/python/config/ table_config = conn.run(r.table(table).config()) num_shards = len(table_config["shards"]) try: conn.run(r.table(table).reconfigure(shards=num_shards, replicas=args.num_replicas)) except r.ReqlOpFailedError as e: logger.warn(e)
def test_store_block(): from bigchaindb.backend import connect, query from bigchaindb.lib import Block conn = connect() block = Block(app_hash='random_utxo', height=3, transactions=[]) query.store_block(conn, block._asdict()) cursor = conn.db.blocks.find({}, projection={'_id': False}) assert cursor.count() == 1
def test_init_database_is_graceful_if_db_exists(): import bigchaindb from bigchaindb import backend from bigchaindb.backend.schema import init_database conn = backend.connect() dbname = bigchaindb.config['database']['name'] # The db is set up by the fixtures assert dbname in conn.conn.database_names() init_database()
def test_get_block(): from bigchaindb.backend import connect, query from bigchaindb.lib import Block conn = connect() block = Block(app_hash='random_utxo', height=3, transactions=[]) conn.db.blocks.insert_one(block._asdict()) block = dict(query.get_block(conn, 3)) assert block['height'] == 3
def run_drop(args): """Drop the database""" bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) dbname = bigchaindb.config["database"]["name"] if not args.yes: response = input("Do you want to drop `{}` database? [y/n]: ".format(dbname)) if response != "y": return conn = backend.connect() dbname = bigchaindb.config["database"]["name"] schema.drop_database(conn, dbname)
def dummy_db(request): from bigchaindb.backend import connect conn = connect() dbname = request.fixturename xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid') if xdist_suffix: dbname = '{}_{}'.format(dbname, xdist_suffix) _drop_db(conn, dbname) # make sure we start with a clean DB schema.init_database(conn, dbname) yield dbname _drop_db(conn, dbname)
def _bdb(_setup_database, _configure_bigchaindb): from bigchaindb import config from bigchaindb.backend import connect from .utils import flush_db from bigchaindb.common.memoize import to_dict, from_dict from bigchaindb.models import Transaction conn = connect() yield dbname = config['database']['name'] flush_db(conn, dbname) to_dict.cache_clear() from_dict.cache_clear() Transaction._input_valid.cache_clear()
def test_get_metadata(): from bigchaindb.backend import connect, query conn = connect() metadata = [ {'id': 1, 'metadata': None}, {'id': 2, 'metadata': {'key': 'value'}}, {'id': 3, 'metadata': '3'}, ] conn.db.metadata.insert_many(deepcopy(metadata), ordered=False) for meta in metadata: assert query.get_metadata(conn, [meta['id']])
def test_get_assets(): from bigchaindb.backend import connect, query conn = connect() assets = [ {'id': 1, 'data': '1'}, {'id': 2, 'data': '2'}, {'id': 3, 'data': '3'}, ] conn.db.assets.insert_many(deepcopy(assets), ordered=False) for asset in assets: assert query.get_asset(conn, asset['id'])
def test_connection_error(mock_client): from bigchaindb.backend import connect from bigchaindb.backend.exceptions import ConnectionError # force the driver to throw ConnectionFailure # the mock on time.sleep is to prevent the actual sleep when running # the tests mock_client.side_effect = pymongo.errors.ConnectionFailure() with pytest.raises(ConnectionError): conn = connect() conn.db assert mock_client.call_count == 3
def run_drop(args): """Drop the database""" dbname = bigchaindb.config['database']['name'] if not args.yes: response = input_on_stderr('Do you want to drop `{}` database? [y/n]: '.format(dbname)) if response != 'y': return conn = backend.connect() dbname = bigchaindb.config['database']['name'] try: schema.drop_database(conn, dbname) except DatabaseDoesNotExist: print("Cannot drop '{name}'. The database does not exist.".format(name=dbname), file=sys.stderr)
def test_get_connection_returns_the_correct_instance(db_host, db_port): from bigchaindb.backend import connect from bigchaindb.backend.connection import Connection from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection config = { 'backend': 'localmongodb', 'host': db_host, 'port': db_port, 'name': 'test', 'replicaset': None, } conn = connect(**config) assert isinstance(conn, Connection) assert isinstance(conn, LocalMongoDBConnection) assert conn.conn._topology_settings.replica_set_name == config['replicaset']
def test_create_tables(): import bigchaindb from bigchaindb import backend from bigchaindb.backend import schema conn = backend.connect() dbname = bigchaindb.config['database']['name'] # The db is set up by the fixtures so we need to remove it conn.conn.drop_database(dbname) schema.create_database(conn, dbname) schema.create_tables(conn, dbname) collection_names = conn.conn[dbname].collection_names() assert set(collection_names) == { 'transactions', 'assets', 'metadata', 'blocks', 'utxos', 'validators', 'elections', 'pre_commit', 'abci_chains', } indexes = conn.conn[dbname]['assets'].index_information().keys() assert set(indexes) == {'_id_', 'asset_id', 'text'} index_info = conn.conn[dbname]['transactions'].index_information() indexes = index_info.keys() assert set(indexes) == { '_id_', 'transaction_id', 'asset_id', 'outputs', 'inputs'} assert index_info['transaction_id']['unique'] index_info = conn.conn[dbname]['blocks'].index_information() indexes = index_info.keys() assert set(indexes) == {'_id_', 'height'} assert index_info['height']['unique'] index_info = conn.conn[dbname]['utxos'].index_information() assert set(index_info.keys()) == {'_id_', 'utxo'} assert index_info['utxo']['unique'] assert index_info['utxo']['key'] == [('transaction_id', 1), ('output_index', 1)] indexes = conn.conn[dbname]['elections'].index_information() assert set(indexes.keys()) == {'_id_', 'election_id'} assert indexes['election_id']['unique'] indexes = conn.conn[dbname]['pre_commit'].index_information() assert set(indexes.keys()) == {'_id_', 'pre_commit_id'} assert indexes['pre_commit_id']['unique']
def test_bigchain_class_initialization_with_parameters(): from bigchaindb import BigchainDB from bigchaindb.backend import connect from bigchaindb.validation import BaseValidationRules init_db_kwargs = { 'backend': 'localmongodb', 'host': 'this_is_the_db_host', 'port': 12345, 'name': 'this_is_the_db_name', } connection = connect(**init_db_kwargs) bigchain = BigchainDB(connection=connection) assert bigchain.connection == connection assert bigchain.connection.host == init_db_kwargs['host'] assert bigchain.connection.port == init_db_kwargs['port'] assert bigchain.connection.dbname == init_db_kwargs['name'] assert bigchain.validation == BaseValidationRules
def test_init_creates_db_tables_and_indexes(): import bigchaindb from bigchaindb import backend from bigchaindb.backend.schema import init_database conn = backend.connect() dbname = bigchaindb.config['database']['name'] # the db is set up by the fixture so we need to remove it conn.conn.drop_database(dbname) init_database() collection_names = conn.conn[dbname].collection_names() assert set(collection_names) == { 'transactions', 'assets', 'metadata', 'blocks', 'utxos', 'pre_commit', 'validators', 'elections', 'abci_chains', } indexes = conn.conn[dbname]['assets'].index_information().keys() assert set(indexes) == {'_id_', 'asset_id', 'text'} indexes = conn.conn[dbname]['transactions'].index_information().keys() assert set(indexes) == { '_id_', 'transaction_id', 'asset_id', 'outputs', 'inputs'} indexes = conn.conn[dbname]['blocks'].index_information().keys() assert set(indexes) == {'_id_', 'height'} indexes = conn.conn[dbname]['utxos'].index_information().keys() assert set(indexes) == {'_id_', 'utxo'} indexes = conn.conn[dbname]['pre_commit'].index_information().keys() assert set(indexes) == {'_id_', 'pre_commit_id'} indexes = conn.conn[dbname]['validators'].index_information().keys() assert set(indexes) == {'_id_', 'height'} indexes = conn.conn[dbname]['abci_chains'].index_information().keys() assert set(indexes) == {'_id_', 'height', 'chain_id'} indexes = conn.conn[dbname]['elections'].index_information().keys() assert set(indexes) == {'_id_', 'election_id'}
def test_write_metadata(): from bigchaindb.backend import connect, query conn = connect() metadata = [ {'id': 1, 'data': '1'}, {'id': 2, 'data': '2'}, {'id': 3, 'data': '3'} ] # write the assets query.store_metadatas(conn, deepcopy(metadata)) # check that 3 assets were written to the database cursor = conn.db.metadata.find({}, projection={'_id': False})\ .sort('id', pymongo.ASCENDING) assert cursor.count() == 3 assert list(cursor) == metadata
def test_outputs_query_key_order(b, user_pk, user_sk, user2_pk, user2_sk): from bigchaindb import backend from bigchaindb.backend import connect tx1 = Transaction.create([user_pk], [([user_pk], 3), ([user_pk], 2), ([user_pk], 1)])\ .sign([user_sk]) b.store_bulk_transactions([tx1]) inputs = tx1.to_inputs() tx2 = Transaction.transfer([inputs[1]], [([user2_pk], 2)], tx1.id).sign([user_sk]) assert tx2.validate(b) tx2_dict = tx2.to_dict() fulfills = tx2_dict['inputs'][0]['fulfills'] tx2_dict['inputs'][0]['fulfills'] = {'transaction_id': fulfills['transaction_id'], 'output_index': fulfills['output_index']} backend.query.store_transactions(b.connection, [tx2_dict]) outputs = b.get_outputs_filtered(user_pk, spent=False) assert len(outputs) == 2 outputs = b.get_outputs_filtered(user2_pk, spent=False) assert len(outputs) == 1 # clean the transaction, metdata and asset collection conn = connect() conn.run(conn.collection('transactions').delete_many({})) conn.run(conn.collection('metadata').delete_many({})) conn.run(conn.collection('assets').delete_many({})) b.store_bulk_transactions([tx1]) tx2_dict = tx2.to_dict() tx2_dict['inputs'][0]['fulfills'] = {'output_index': fulfills['output_index'], 'transaction_id': fulfills['transaction_id']} backend.query.store_transactions(b.connection, [tx2_dict]) outputs = b.get_outputs_filtered(user_pk, spent=False) assert len(outputs) == 2 outputs = b.get_outputs_filtered(user2_pk, spent=False) assert len(outputs) == 1
def test_get_spending_transactions(user_pk, user_sk): from bigchaindb.backend import connect, query from bigchaindb.models import Transaction conn = connect() out = [([user_pk], 1)] tx1 = Transaction.create([user_pk], out * 3) tx1.sign([user_sk]) inputs = tx1.to_inputs() tx2 = Transaction.transfer([inputs[0]], out, tx1.id).sign([user_sk]) tx3 = Transaction.transfer([inputs[1]], out, tx1.id).sign([user_sk]) tx4 = Transaction.transfer([inputs[2]], out, tx1.id).sign([user_sk]) txns = [deepcopy(tx.to_dict()) for tx in [tx1, tx2, tx3, tx4]] conn.db.transactions.insert_many(txns) links = [inputs[0].fulfills.to_dict(), inputs[2].fulfills.to_dict()] txns = list(query.get_spending_transactions(conn, links)) # tx3 not a member because input 1 not asked for assert txns == [tx2.to_dict(), tx4.to_dict()]
def test_validator_update(): from bigchaindb.backend import connect, query conn = connect() def gen_validator_update(height): return {'data': 'somedata', 'height': height, 'election_id': f'election_id_at_height_{height}'} for i in range(1, 100, 10): value = gen_validator_update(i) query.store_validator_set(conn, value) v1 = query.get_validator_set(conn, 8) assert v1['height'] == 1 v41 = query.get_validator_set(conn, 50) assert v41['height'] == 41 v91 = query.get_validator_set(conn) assert v91['height'] == 91
def test_get_spending_transactions_multiple_inputs(): from bigchaindb.backend import connect, query from bigchaindb.models import Transaction from bigchaindb.common.crypto import generate_key_pair conn = connect() (alice_sk, alice_pk) = generate_key_pair() (bob_sk, bob_pk) = generate_key_pair() (carol_sk, carol_pk) = generate_key_pair() out = [([alice_pk], 9)] tx1 = Transaction.create([alice_pk], out).sign([alice_sk]) inputs1 = tx1.to_inputs() tx2 = Transaction.transfer([inputs1[0]], [([alice_pk], 6), ([bob_pk], 3)], tx1.id).sign([alice_sk]) inputs2 = tx2.to_inputs() tx3 = Transaction.transfer([inputs2[0]], [([bob_pk], 3), ([carol_pk], 3)], tx1.id).sign([alice_sk]) inputs3 = tx3.to_inputs() tx4 = Transaction.transfer([inputs2[1], inputs3[0]], [([carol_pk], 6)], tx1.id).sign([bob_sk]) txns = [deepcopy(tx.to_dict()) for tx in [tx1, tx2, tx3, tx4]] conn.db.transactions.insert_many(txns) links = [ ({'transaction_id': tx2.id, 'output_index': 0}, 1, [tx3.id]), ({'transaction_id': tx2.id, 'output_index': 1}, 1, [tx4.id]), ({'transaction_id': tx3.id, 'output_index': 0}, 1, [tx4.id]), ({'transaction_id': tx3.id, 'output_index': 1}, 0, None), ] for l, num, match in links: txns = list(query.get_spending_transactions(conn, [l])) assert len(txns) == num if len(txns): assert [tx['id'] for tx in txns] == match
def test_write_assets(): from bigchaindb.backend import connect, query conn = connect() assets = [ {'id': 1, 'data': '1'}, {'id': 2, 'data': '2'}, {'id': 3, 'data': '3'}, # Duplicated id. Should not be written to the database {'id': 1, 'data': '1'}, ] # write the assets for asset in assets: query.store_asset(conn, deepcopy(asset)) # check that 3 assets were written to the database cursor = conn.db.assets.find({}, projection={'_id': False})\ .sort('id', pymongo.ASCENDING) assert cursor.count() == 3 assert list(cursor) == assets[:-1]
def run_set_shards(args): conn = backend.connect() try: set_shards(conn, shards=args.num_shards) except OperationError as e: logger.warn(e)
def run_set_replicas(args): conn = backend.connect() try: set_replicas(conn, replicas=args.num_replicas) except OperationError as e: logger.warn(e)
def test_run_a_simple_query(): from bigchaindb.backend import connect conn = connect() query = r.expr('1') assert conn.run(query) == '1'
def run_set_shards(args): conn = backend.connect() try: set_shards(conn, shards=args.num_shards) except OperationError as e: sys.exit(str(e))
def get_changefeed(): connection = backend.connect(**bigchaindb.config['database']) return backend.get_changefeed(connection, 'votes', ChangeFeed.INSERT)
def test_drop(dummy_db): conn = backend.connect() assert conn.run(r.db_list().contains(dummy_db)) is True schema.drop_database(conn, dummy_db) assert conn.run(r.db_list().contains(dummy_db)) is False
def run_set_replicas(args): conn = backend.connect() try: set_replicas(conn, replicas=args.num_replicas) except OperationError as e: sys.exit(str(e))
def get_changefeed(): connection = backend.connect(**bigchaindb.config['database']) return backend.get_changefeed(connection, 'backlog', ChangeFeed.INSERT | ChangeFeed.UPDATE)
def test_text_search(table): from bigchaindb.backend import connect, query conn = connect() # Example data and tests cases taken from the mongodb documentation # https://docs.mongodb.com/manual/reference/operator/query/text/ objects = [{ 'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50 }, { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5 }, { 'id': 3, 'subject': 'Baking a cake', 'author': 'abc', 'views': 90 }, { 'id': 4, 'subject': 'baking', 'author': 'xyz', 'views': 100 }, { 'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200 }, { 'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80 }, { 'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10 }, { 'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10 }] # insert the assets conn.db[table].insert_many(deepcopy(objects), ordered=False) # test search single word assert list(query.text_search(conn, 'coffee', table=table)) == [ { 'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50 }, { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5 }, { 'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10 }, ] # match any of the search terms assert list(query.text_search(conn, 'bake coffee cake', table=table)) == [{ 'author': 'abc', 'id': 3, 'subject': 'Baking a cake', 'views': 90 }, { 'author': 'xyz', 'id': 1, 'subject': 'coffee', 'views': 50 }, { 'author': 'xyz', 'id': 4, 'subject': 'baking', 'views': 100 }, { 'author': 'efg', 'id': 2, 'subject': 'Coffee Shopping', 'views': 5 }, { 'author': 'efg', 'id': 7, 'subject': 'coffee and cream', 'views': 10 }] # search for a phrase assert list(query.text_search(conn, '\"coffee shop\"', table=table)) == [ { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5 }, ] # exclude documents that contain a term assert list(query.text_search(conn, 'coffee -shop', table=table)) == [ { 'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50 }, { 'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10 }, ] # search different language assert list(query.text_search(conn, 'leche', language='es', table=table)) == [{ 'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200 }, { 'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10 }] # case and diacritic insensitive search assert list(query.text_search(conn, 'сы́рники CAFÉS', table=table)) == [{ 'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80 }, { 'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200 }, { 'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10 }] # case sensitive search assert list( query.text_search(conn, 'Coffee', case_sensitive=True, table=table)) == [ { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5 }, ] # diacritic sensitive search assert list( query.text_search(conn, 'CAFÉ', diacritic_sensitive=True, table=table)) == [ { 'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200 }, ] # return text score assert list(query.text_search(conn, 'coffee', text_score=True, table=table)) == [ { 'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50, 'score': 1.0 }, { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5, 'score': 0.75 }, { 'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10, 'score': 0.75 }, ] # limit search result assert list(query.text_search(conn, 'coffee', limit=2, table=table)) == [ { 'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50 }, { 'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5 }, ]
def db_conn(): from bigchaindb.backend import connect return connect()
def get_changefeed(): connection = backend.connect(**bigchaindb.config['database']) return backend.get_changefeed(connection, 'bigchain', ChangeFeed.INSERT, prefeed=initial())
def test_create_database(not_yet_created_db): conn = backend.connect() schema.create_database(conn, not_yet_created_db) assert conn.run(r.db_list().contains(not_yet_created_db)) is True