Exemple #1
0
    def test_seek(self):
        """Given a database with three items and a cursor on the primary keys,
        test that the cursor can be properly position to an arbitrary element
        in the natural order of the keys
        """
        db = IndexedDatabase(
            os.path.join(self._temp_dir, 'test_db'),
            _serialize_tuple,
            _deserialize_tuple,
            indexes={'name': lambda tup: [tup[1].encode()]},
            flag='c',
            _size=1024**2)

        db.put('1', (1, "alice", "Alice's data"))
        db.put('2', (2, "bob", "Bob's data"))
        db.put('3', (3, 'charlie', "Charlie's data"))

        with db.cursor() as curs:
            curs.seek('2')
            self.assertEqual(2, curs.value()[0])

            self.assertEqual('2', curs.key())

            iterator = curs.iter()
            self.assertEqual(2, next(iterator)[0])
Exemple #2
0
    def test_hex_ordered_indexing(self):
        """Test that an index that uses hex-encoded keys will properly order
        the keys (i.e. the natural order of the keys is in numerical order).
        """

        def to_hex(num):
            return "{0:#0{1}x}".format(num, 18)

        def age_index_key_fn(tup):
            return [to_hex(tup[0]).encode()]

        db = IndexedDatabase(
            os.path.join(self._temp_dir, 'test_db'),
            _serialize_tuple,
            _deserialize_tuple,
            indexes={'age': age_index_key_fn},
            flag='c',
            _size=1024**2)

        entry_count = 100
        for i in range(1, entry_count):
            db.put(str(entry_count - i), (i, "foo" + str(i), "data"))

        self.assertEqual(
            [to_hex(i) for i in range(1, entry_count)],
            list(db.keys(index='age')))
Exemple #3
0
    def test_integer_indexing(self):
        """Test that a database can be indexed using integer keys.
        """
        int_index_config = {
            'key_fn': lambda tup: [struct.pack('I', tup[0])],
            'integerkey': True
        }
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'age': int_index_config},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (30, "alice", "Alice's data"))
        db.put('3', (20, "bob", "Bob's data"))
        db.put('4', (12, "charlie", "Charlie's data"))

        self.assertEqual(
            [1, 12, 20, 30],
            [struct.unpack('I', k.encode())[0] for k in db.keys(index='age')])
        with db.cursor(index='age') as curs:
            self.assertEqual([(1, "foo", "bar"),
                              (12, "charlie", "Charlie's data"),
                              (20, "bob", "Bob's data"),
                              (30, "alice", "Alice's data")],
                             list(curs.iter()))
Exemple #4
0
    def test_delete(self):
        """Test that items are deleted, including their index references.
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        with db.cursor(index='name') as curs:
            ordered_values = list(curs.iter())

        self.assertEqual([(2, "alice", "Alice's data"),
                          (3, "bob", "Bob's data"), (1, "foo", "bar")],
                         ordered_values)

        db.delete('3')

        with db.cursor(index='name') as curs:
            ordered_values = list(curs.iter())

        self.assertEqual([(2, "alice", "Alice's data"), (1, "foo", "bar")],
                         ordered_values)
Exemple #5
0
    def test_contains(self):
        """Given a database with three records and an index, test the
        following:
         - a primary key will return True for `contains_key` and `in`
         - a non-existent key will return False for both of those methods
         - an index key will return True for `contains_key`, using the index
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        self.assertTrue('1' in db)
        self.assertTrue(db.contains_key('1'))
        self.assertFalse('4' in db)
        self.assertFalse(db.contains_key('4'))

        self.assertTrue(db.contains_key('alice', index='name'))
        self.assertFalse(db.contains_key('charlie', index='name'))
Exemple #6
0
    def test_index_iteration_with_concurrent_mods(self):
        """Given a database with three items, and a cursor on the index keys,
        test that a concurrent update will:
        - not change the results
        - not interrupt the iteration with an error
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        with db.cursor(index='name') as curs:
            iterator = curs.iter()

            self.assertEqual(2, next(iterator)[0])

            db.put('4', (4, 'charlie', "Charlie's data"))

            self.assertEqual(3, next(iterator)[0])
            self.assertEqual(1, next(iterator)[0])

            with self.assertRaises(StopIteration):
                next(iterator)
Exemple #7
0
def get_databases(bind_network, data_dir, database=None):
    # Get the global state database to operate on
    global_state_db_filename = os.path.join(
        data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('verifying state in %s', global_state_db_filename)
    global_state_db = NativeLmdbDatabase(
        global_state_db_filename,
        indexes=MerkleDatabase.create_index_configuration())

    if database:
        LOGGER.debug('get_databases: OPEN ORIENTDB uri=%s', database)
        block_db = OrientDatabase(
            database,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            indexes=BlockStore.create_index_configuration(),
            flag='c')
        LOGGER.debug('get_databases:OPEN ORIENT DB DONE %s', block_db)
    else:
        # Get the blockstore
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = IndexedDatabase(
            block_db_filename,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            flag='c',
            indexes=BlockStore.create_index_configuration())

    blockstore = BlockStore(block_db)

    return global_state_db, blockstore
Exemple #8
0
    def test_count(self):
        """Test that a database with three records, plus an index will
        return the correct count of primary key/values, using `len`.
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        self.assertEqual(3, len(db))
Exemple #9
0
    def test_update(self):
        """Test that a database will commit both inserts and deletes using the
        update method.
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        db.update([('4', (4, 'charlie', "Charlie's data"))], ['1'])

        self.assertEqual(['2', '3', '4'], db.keys())
Exemple #10
0
    def test_index_empty_db(self):
        """Given an empty database, show that the cursor will return a value of
        None for the first position.
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        with db.cursor(index='name') as curs:
            curs.first()
            self.assertIsNone(curs.value())

        with db.cursor() as curs:
            curs.first()
            self.assertIsNone(curs.value())
Exemple #11
0
    def test_indexing(self):
        """Test basic indexing around name
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        self.assertEqual((1, "foo", "bar"), db.get('1'))

        self.assertEqual((2, "alice", "Alice's data"),
                         db.get('alice', index='name'))
Exemple #12
0
    def test_get_multi(self):
        """Given a database with three records and an index, test that it can
        return multiple values from a set of keys.

        Show that:
         - it returns all existing values in the list
         - ignores keys that do not exist (i.e. no error is thrown)
         - it works with index keys in the same manor
        """
        db = IndexedDatabase(
            os.path.join(self._temp_dir, 'test_db'),
            _serialize_tuple,
            _deserialize_tuple,
            indexes={'name': lambda tup: [tup[1].encode()]},
            flag='c',
            _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        # get multi via the primary key
        self.assertEqual([
            ('3', (3, "bob", "Bob's data")),
            ('2', (2, "alice", "Alice's data"))],
            db.get_multi(['3', '2']))

        # Ignore unknown primary keys
        self.assertEqual([
            ('3', (3, "bob", "Bob's data"))],
            db.get_multi(['3', '4']))

        # Get multi via an index
        self.assertEqual([
            ('1', (1, "foo", "bar")),
            ('3', (3, "bob", "Bob's data"))],
            db.get_multi(['foo', 'bob'], index='name'))

        # Ignore unknown index keys
        self.assertEqual([
            ('3', (3, "bob", "Bob's data"))],
            db.get_multi(['bar', 'bob'], index='name'))
Exemple #13
0
    def test_index_reverse_iteration(self):
        """Test reverse iteration over the items in a database, using the
        reverse natural order of the index keys.
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        with db.cursor(index='name') as curs:
            ordered_values = list(curs.iter_rev())

        self.assertEqual([(1, "foo", "bar"), (3, "bob", "Bob's data"),
                          (2, "alice", "Alice's data")], ordered_values)
Exemple #14
0
    def test_update_replace_index(self):
        """Test that update will properly update insert records that have
        the same index value of a deleted record.
        - insert items should be added
        - inserted items index should be correct
        - deleted items should be removed
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)
        db.put('1', (1, "foo", "bar"))
        db.put('2', (2, "alice", "Alice's data"))
        db.put('3', (3, "bob", "Bob's data"))

        db.update([('4', (4, 'foo', "foo's data"))], ['1'])

        self.assertEqual(['2', '3', '4'], db.keys())
        self.assertEqual((4, 'foo', "foo's data"), db.get('foo', index='name'))
def get_databases(bind_network, data_dir=None):
    # Get the global state database to operate on
    if data_dir is not None:
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('verifying state in %s', global_state_db_filename)
        global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')
    else:
        global_state_db = DictDatabase()

    # Get the blockstore
    block_db_filename = os.path.join(data_dir,
                                     'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = IndexedDatabase(block_db_filename,
                               BlockStore.serialize_block,
                               BlockStore.deserialize_block,
                               flag='c',
                               indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    return global_state_db, blockstore
Exemple #16
0
    def test_last(self):
        """Given a database with three items and a cursor on the primary keys,
        test that the cursor can be properly position to the last element in
        the natural order of the keys
        """
        db = IndexedDatabase(
            os.path.join(self._temp_dir, 'test_db'),
            _serialize_tuple,
            _deserialize_tuple,
            indexes={'name': lambda tup: [tup[1].encode()]},
            flag='c',
            _size=1024**2)

        db.put('1', (1, "alice", "Alice's data"))
        db.put('2', (2, "bob", "Bob's data"))
        db.put('3', (3, 'charlie', "Charlie's data"))

        with db.cursor() as curs:
            # Start from the beginning
            first = next(curs.iter())
            # Read backward again
            iterator = curs.iter_rev()
            backward = next(iterator)

            self.assertEqual(first, backward)

            # Check the iterator is exhausted from there
            with self.assertRaises(StopIteration):
                next(iterator)

            # reset to first element
            curs.last()
            new_iter = curs.iter_rev()

            # verify that we'll see the first element again
            last = next(new_iter)
            self.assertEqual(3, last[0])
Exemple #17
0
def verify_state(bind_network, bind_component, scheduler_type, data_dir=None):
    """
    Verify the state root hash of all blocks is in state and if not,
    reconstruct the missing state. Assumes that there are no "holes" in
    state, ie starting from genesis, state is present for all blocks up to some
    point and then not at all. If persist is False, this recomputes state in
    memory for all blocks in the blockstore and verifies the state root
    hashes.

    Raises:
        InvalidChainError: The chain in the blockstore is not valid.
        ExecutionError: An unrecoverable error was encountered during batch
            execution.
    """

    # Get the global state database to operate on
    if data_dir is not None:
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('verifying state in %s', global_state_db_filename)
        global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')
    else:
        global_state_db = DictDatabase()

    state_view_factory = StateViewFactory(global_state_db)

    # Get the blockstore
    block_db_filename = os.path.join(data_dir,
                                     'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = IndexedDatabase(block_db_filename,
                               BlockStore.serialize_block,
                               BlockStore.deserialize_block,
                               flag='c',
                               indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    # Check if we should do state verification
    start_block, prev_state_root = search_for_present_state_root(
        blockstore, state_view_factory)

    if start_block is None:
        LOGGER.info(
            "Skipping state verification: chain head's state root is present")
        return

    LOGGER.info("Recomputing missing state from block %s with %s scheduler",
                start_block, scheduler_type)

    component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                           name='Component')

    component_dispatcher = Dispatcher()
    component_service = Interconnect(bind_component,
                                     component_dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True,
                                     max_future_callback_workers=10)

    context_manager = ContextManager(global_state_db)

    transaction_executor = TransactionExecutor(
        service=component_service,
        context_manager=context_manager,
        settings_view_factory=SettingsViewFactory(state_view_factory),
        scheduler_type=scheduler_type,
        invalid_observers=[])

    component_service.set_check_connections(
        transaction_executor.check_connections)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST,
        tp_state_handlers.TpReceiptAddDataHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_EVENT_ADD_REQUEST,
        tp_state_handlers.TpEventAddHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_DELETE_REQUEST,
        tp_state_handlers.TpStateDeleteHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_GET_REQUEST,
        tp_state_handlers.TpStateGetHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_SET_REQUEST,
        tp_state_handlers.TpStateSetHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_REGISTER_REQUEST,
        processor_handlers.ProcessorRegisterHandler(
            transaction_executor.processors), component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_UNREGISTER_REQUEST,
        processor_handlers.ProcessorUnRegisterHandler(
            transaction_executor.processors), component_thread_pool)

    component_dispatcher.start()
    component_service.start()

    process_blocks(initial_state_root=prev_state_root,
                   blocks=blockstore.get_block_iter(start_block=start_block,
                                                    reverse=False),
                   transaction_executor=transaction_executor,
                   context_manager=context_manager,
                   state_view_factory=state_view_factory)

    component_dispatcher.stop()
    component_service.stop()
    component_thread_pool.shutdown(wait=True)
    transaction_executor.stop()
    context_manager.stop()
Exemple #18
0
def start_bot_api(host,
                  port,
                  connection,
                  timeout,
                  registry,
                  connects=None,
                  client_max_size=None):
    """Builds the web app, adds route handlers, and finally starts the app.
    """
    #tele_db = LMDBNoLockDatabase(TELE_DB_FILENAME, 'c')
    tele_db = IndexedDatabase(TELE_DB_FILENAME,
                              serialize_data,
                              deserialize_data,
                              indexes={
                                  'name': lambda dict: [dict['name'].encode()],
                                  'type': lambda dict: [dict['type'].encode()]
                              },
                              flag='c',
                              _size=DEFAULT_DB_SIZE,
                              dupsort=True)
    if "ROOT" in tele_db:
        LOGGER.info('TAKE ROOT FROM DB=%s', tele_db["ROOT"])
        #for key in tele_db.keys():
        LOGGER.info('KEYS=%s', list(tele_db.keys()))
        with tele_db.cursor() as curs:
            for val in curs.iter():
                LOGGER.info('values=%s', val)
        with tele_db.cursor(index='name') as curs:
            #values = list(curs.iter())
            for val in curs.iter():
                LOGGER.info('Name values=%s', val)
        #LOGGER.info('ordered_values=%s',values)

    else:
        tele_db.put("ROOT", {'val': 1, 'name': 'sticker', 'type': 'user'})

    loop = asyncio.get_event_loop()
    connection.open()
    bot = BgxTeleBot(loop, connection, tele_db, TOKEN,
                     connects=connects)  #Tbot(loop, connection,TOKEN)
    # add handler for intention
    bot.add_intent_handler('smalltalk.greetings.hello', bot.intent_hello)
    bot.add_intent_handler('smalltalk.greetings.bye', bot.intent_bye)
    bot.add_intent_handler('smalltalk.agent.can_you_help', bot.intent_help)
    bot.add_intent_handler('smalltalk.dialog.hold_on', bot.intent_hold_on)
    bot.add_intent_handler('smalltalk.user.needs_advice',
                           bot.intent_needs_advice)
    #bot.add_intent_handler('smalltalk.agent.get_wallet',bot.intent_get_wallet)
    bot.add_intent_handler('smalltalk.agent.check_wallet',
                           bot.intent_check_wallet)
    bot.add_intent_handler('smalltalk.agent.check_wallet_history',
                           bot.intent_check_wallet_history)
    bot.add_intent_handler('smalltalk.agent.create_wallet',
                           bot.intent_create_wallet)
    bot.add_intent_handler('smalltalk.agent.trans_token',
                           bot.intent_trans_token)
    bot.add_intent_handler('smalltalk.agent.inc_wallet', bot.intent_inc_wallet)
    bot.add_intent_handler('smalltalk.agent.dec_wallet', bot.intent_dec_wallet)
    bot.add_intent_handler('smalltalk.agent.buy_stuff', bot.intent_buy_stuff)
    bot.add_intent_handler('smalltalk.agent.sell_stuff', bot.intent_sell_stuff)
    # make stuff
    bot.add_intent_handler('smalltalk.agent.create_stuff',
                           bot.intent_create_stuff)
    bot.add_intent_handler('smalltalk.agent.update_stuff',
                           bot.intent_update_stuff)
    bot.add_intent_handler('smalltalk.agent.show_stuff', bot.intent_show_stuff)
    bot.add_intent_handler("smalltalk.agent.show_stuff_history",
                           bot.intent_show_stuff_history)
    bot.add_intent_handler("smalltalk.agent.show_stuff_list",
                           bot.intent_show_stuff_list)
    #
    bot.add_intent_handler("smalltalk.agent.show_gateway",
                           bot.intent_show_gateway)
    bot.add_intent_handler("smalltalk.agent.show_gateway_list",
                           bot.intent_show_gateway_list)
    bot.add_intent_handler("smalltalk.agent.set_gateway",
                           bot.intent_set_gateway)
    bot.add_intent_handler("smalltalk.agent.peers_down", bot.intent_peers_down)
    bot.add_intent_handler("smalltalk.agent.peers_up", bot.intent_peers_up)
    bot.add_intent_handler("smalltalk.agent.peers_control_list",
                           bot.intent_peers_control_list)
    bot.add_intent_handler("smalltalk.agent.peer_info", bot.intent_peer_info)

    bot.add_intent_handler("smalltalk.agent.pause", bot.intent_pause)
    bot.add_intent_handler("smalltalk.agent.unpause", bot.intent_unpause)
    bot.add_intent_handler('smalltalk.agent.chat_admins',
                           bot.intent_chat_admins)
    bot.add_intent_handler('smalltalk.agent.get_users', bot.intent_get_users)
    LOGGER.info('start_bot_api for=%s', TOKEN)
    bot.start()
    """
Exemple #19
0
from sawtooth_validator.database.indexed_database import IndexedDatabase


def _serialize_tuple(tup):
    return "{}-{}-{}".format(*tup).encode()


def _deserialize_tuple(bytestring):
    (rec_id, name, data) = tuple(bytestring.decode().split('-'))
    return (int(rec_id), name, data)


db = IndexedDatabase(os.path.join('/tmp/sawtooth', 'test_db'),
                     _serialize_tuple,
                     _deserialize_tuple,
                     indexes={'name': lambda tup: [tup[1].encode()]},
                     flag='c',
                     _size=1024**2)


def do_put():
    db.put('1', (1, "foo", "bar"))
    db.put('2', (2, "alice", "Alice's data"))
    db.put('3', (3, "bob", "Bob's data"))


def test_put(benchmark):
    """Test that a database with three records, plus an index will
    return the correct count of primary key/values, using `len`.
    """
Exemple #20
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 bind_consensus,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signer,
                 scheduler_type,
                 permissions,
                 minimum_peer_connectivity,
                 maximum_peer_connectivity,
                 state_pruning_block_depth,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signer (str): cryptographic signer the validator uses for
                signing
        """
        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('global state database file is %s',
                     global_state_db_filename)
        global_state_db = NativeLmdbDatabase(
            global_state_db_filename,
            indexes=MerkleDatabase.create_index_configuration())
        state_view_factory = StateViewFactory(global_state_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = IndexedDatabase(
            block_db_filename,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            flag='c',
            indexes=BlockStore.create_index_configuration())
        block_store = BlockStore(block_db)
        # The cache keep time for the journal's block cache must be greater
        # than the cache keep time used by the completer.
        base_keep_time = 1200
        block_cache = BlockCache(block_store,
                                 keep_time=int(base_keep_time * 9 / 8),
                                 purge_frequency=30)

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=10, name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                             name='Network')
        client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5,
                                                            name='Client')
        sig_pool = InstrumentedThreadPoolExecutor(max_workers=3,
                                                  name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher()
        network_dispatcher = Dispatcher()

        # -- Setup Services -- #
        component_service = Interconnect(bind_component,
                                         component_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(bind_network,
                                       dispatcher=network_dispatcher,
                                       zmq_identity=zmq_identity,
                                       secured=secure,
                                       server_public_key=network_public_key,
                                       server_private_key=network_private_key,
                                       heartbeat=True,
                                       public_endpoint=endpoint,
                                       connection_timeout=120,
                                       max_incoming_connections=100,
                                       max_future_callback_workers=10,
                                       authorize=True,
                                       signer=identity_signer,
                                       roles=roles)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db)

        batch_tracker = BatchTracker(block_store)

        settings_cache = SettingsCache(
            SettingsViewFactory(state_view_factory), )

        transaction_executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        component_service.set_check_connections(
            transaction_executor.check_connections)

        event_broadcaster = EventBroadcaster(component_service, block_store,
                                             receipt_store)

        # -- Setup P2P Networking -- #
        gossip = Gossip(network_service,
                        settings_cache,
                        lambda: block_store.chain_head,
                        block_store.chain_head_state_root,
                        endpoint=endpoint,
                        peering_mode=peering,
                        initial_seed_endpoints=seeds_list,
                        initial_peer_endpoints=peer_list,
                        minimum_peer_connectivity=minimum_peer_connectivity,
                        maximum_peer_connectivity=maximum_peer_connectivity,
                        topology_check_frequency=1)

        completer = Completer(block_store,
                              gossip,
                              cache_keep_time=base_keep_time,
                              cache_purge_frequency=30,
                              requested_keep_time=300)

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        settings_observer = SettingsObserver(
            to_update=settings_cache.invalidate, forked=settings_cache.forked)

        # -- Consensus Engine -- #
        consensus_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=3, name='Consensus')
        consensus_dispatcher = Dispatcher()
        consensus_service = Interconnect(bind_consensus,
                                         consensus_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        consensus_notifier = ConsensusNotifier(consensus_service)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            signer=identity_signer)

        block_publisher = BlockPublisher(
            transaction_executor=transaction_executor,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            settings_cache=settings_cache,
            block_sender=block_sender,
            batch_sender=batch_sender,
            chain_head=block_store.chain_head,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory)

        block_publisher_batch_sender = block_publisher.batch_sender()

        block_validator = BlockValidator(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            transaction_executor=transaction_executor,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier)

        chain_controller = ChainController(
            block_store=block_store,
            block_cache=block_cache,
            block_validator=block_validator,
            state_database=global_state_db,
            chain_head_lock=block_publisher.chain_head_lock,
            state_pruning_block_depth=state_pruning_block_depth,
            data_dir=data_dir,
            observers=[
                event_broadcaster, receipt_store, batch_tracker,
                identity_observer, settings_observer
            ])

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=transaction_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(block_publisher_batch_sender.send)
        completer.set_on_block_received(chain_controller.queue_block)
        completer.set_chain_has_block(chain_controller.has_block)

        # -- Register Message Handler -- #
        network_handlers.add(network_dispatcher, network_service, gossip,
                             completer, responder, network_thread_pool,
                             sig_pool, chain_controller.has_block,
                             block_publisher.has_batch, permission_verifier,
                             block_publisher, consensus_notifier)

        component_handlers.add(component_dispatcher, gossip, context_manager,
                               transaction_executor, completer, block_store,
                               batch_tracker, global_state_db,
                               self.get_chain_head_state_root_hash,
                               receipt_store, event_broadcaster,
                               permission_verifier, component_thread_pool,
                               client_thread_pool, sig_pool, block_publisher)

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        consensus_proxy = ConsensusProxy(
            block_cache=block_cache,
            chain_controller=chain_controller,
            block_publisher=block_publisher,
            gossip=gossip,
            identity_signer=identity_signer,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            state_view_factory=state_view_factory)

        consensus_handlers.add(consensus_dispatcher, consensus_thread_pool,
                               consensus_proxy)

        self._consensus_dispatcher = consensus_dispatcher
        self._consensus_service = consensus_service
        self._consensus_thread_pool = consensus_thread_pool

        self._client_thread_pool = client_thread_pool
        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._transaction_executor = transaction_executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._chain_controller = chain_controller
        self._block_validator = block_validator
Exemple #21
0
    def test_first(self):
        """Given a database with three items and a cursor on the primary keys,
        test that the cursor can be properly position to the first element in
        the natural order of the keys
        """
        db = IndexedDatabase(os.path.join(self._temp_dir, 'test_db'),
                             _serialize_tuple,
                             _deserialize_tuple,
                             indexes={'name': lambda tup: [tup[1].encode()]},
                             flag='c',
                             _size=1024**2)

        db.put('1', (1, "alice", "Alice's data"))
        db.put('2', (2, "bob", "Bob's data"))
        db.put('3', (3, 'charlie', "Charlie's data"))
        print("keys={}".format(db.keys()))
        with db.cursor() as curs:
            # Start from the end
            last = next(curs.iter_rev())
            # Read forward again
            iterator = curs.iter()
            forward = next(iterator)

            self.assertEqual(last, forward)
            """
            iter =curs.iter()
            k = curs.key()
            v = next(iter)
            k1 = curs.key()
            v1 = next(iter)  
            k2 = curs.key() 
            v2 = next(iter)
            k3 = curs.key()   
            print("ITER:[{}]={},[{}]={},[{}]={}=>{}".format(k,v,k1,v1,k2,v2,k3))
            print("POS={}={}".format(curs.key(),curs.value())) 
            
            print('LIST',list(iter)) # list(curs.iter()))
            print("POS={}".format(curs.key()))
            iter = curs.iter_rev()
            print("POS={}".format(curs.key()))
            print('LIST REV',list(iter))
            print("POS={}".format(curs.key()))

            print("key '{}' {}".format(curs.key(),curs.value()))
            riter = curs.iter_rev()
            print("key1 '{}'".format(curs.key()),'value',curs.value())
            last = next(riter)
            print("key2 '{}'={},{}".format(curs.key(),last,next(riter)))

            last1 = next(riter)
            print("key3 '{}'={}".format(curs.key(),last1))
            curs.iter_rev()
            print("key4 '{}'={}".format(curs.key(),list(curs.iter_rev())))
            print("key5 '{}'".format(curs.key()))
            key = curs.key()
            print("key6 '{}'={}".format(key,list(curs.iter_rev())))
            # Read forward again
            iterator = curs.iter()
            print('key 1',curs.key(),type(curs.key()))
            #print('VALUE',curs.value())
            forward = next(iterator)
            print('key 2',curs.key())
            print('LAST',last,'forward',forward)
            self.assertEqual(last, forward)
            """
            # Check the iterator is exhausted from there
            with self.assertRaises(StopIteration):
                next(iterator)

            print("reset to first element ....")
            # reset to first element
            curs.first()
            new_iter = curs.iter()

            # verify that we'll see the first element again
            first = next(new_iter)
            print('FIRST', first)
            self.assertEqual(1, first[0])