示例#1
0
    def test_iterate_chain_on_empty_block_store(self):
        """Given a block store with no blocks, iterate using predecessor iterator
        and verify that it results in an empty list.
        """
        block_store = BlockStore(DictDatabase())

        self.assertEqual([], [b for b in block_store.get_predecessor_iter()])
示例#2
0
 def make_block_store(blocks=None):
     block_dir = tempfile.mkdtemp()
     block_db = NativeLmdbDatabase(
         os.path.join(block_dir, 'block.lmdb'),
         BlockStore.create_index_configuration())
     block_store = BlockStore(block_db)
     if blocks is not None:
         block_store.put_blocks(blocks)
     return block_store
示例#3
0
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.dir = tempfile.mkdtemp()
        self.block_db = NativeLmdbDatabase(
            os.path.join(self.dir, 'block.lmdb'),
            BlockStore.create_index_configuration())
        self.block_store = BlockStore(self.block_db)
        self.block_cache = BlockCache(self.block_store)
        self.state_db = NativeLmdbDatabase(
            os.path.join(self.dir, "merkle.lmdb"),
            MerkleDatabase.create_index_configuration())

        self.state_view_factory = NativeStateViewFactory(self.state_db)

        self.block_manager = BlockManager()
        self.block_manager.add_commit_store(self.block_store)

        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            chain_head = self.genesis_block
            self.block_manager.put([chain_head.block])
            self.block_manager.persist(
                chain_head.block.header_signature,
                "commit_store")

        self.block_publisher = BlockPublisher(
            block_manager=self.block_manager,
            transaction_executor=MockTransactionExecutor(),
            transaction_committed=self.block_store.has_transaction,
            batch_committed=self.block_store.has_batch,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            chain_head=chain_head.block,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            batch_observers=[])
    def test_state_verifier(self):
        blockstore = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        global_state_db = NativeLmdbDatabase(
            os.path.join(self._temp_dir, 'test_state_verifier.lmdb'),
            indexes=MerkleDatabase.create_index_configuration())

        precalculated_state_roots = [
            "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3",
            "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7",
            "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340",
        ]

        signer = get_signer()
        populate_blockstore(blockstore, signer, precalculated_state_roots)

        verify_state(
            global_state_db,
            blockstore,
            "tcp://eth0:4004",
            "serial")

        # There is a bug in the shutdown code for some component this depends
        # on, which causes it to occassionally hang during shutdown. Just kill
        # the process for now.
        # pylint: disable=protected-access
        os._exit(0)
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.generate_pubkey(self.signing_key)

        self.identity_signing_key = signing.generate_privkey()
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signing_key=self.identity_signing_key,
            data_dir=None,
            config_dir=None)
示例#6
0
    def setUp(self):
        self.dir = tempfile.mkdtemp()
        self.block_db = NativeLmdbDatabase(
            os.path.join(self.dir, 'block.lmdb'),
            BlockStore.create_index_configuration())
        self.block_store = BlockStore(self.block_db)
        self.block_manager = BlockManager()
        self.block_manager.add_commit_store(self.block_store)
        self.gossip = MockGossip()
        self.completer = Completer(
            block_manager=self.block_manager,
            transaction_committed=self.block_store.has_transaction,
            get_committed_batch_by_id=self.block_store.get_batch,
            get_committed_batch_by_txn_id=(
                self.block_store.get_batch_by_transaction
            ),
            get_chain_head=lambda: self.block_store.chain_head,
            gossip=self.gossip)
        self.completer.set_on_block_received(self._on_block_received)
        self.completer.set_on_batch_received(self._on_batch_received)
        self._has_block_value = True

        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        self.blocks = []
        self.batches = []
示例#7
0
    def test_iterate_chain(self):
        """Given a block store, create an predecessor iterator.

        1. Create a chain of length 5.
        2. Iterate the chain using the get_predecessor_iter from the chain head
        3. Verify that the block ids match the chain, in reverse order
        """

        block_store = BlockStore(DictDatabase())
        chain = self._create_chain(5)
        block_store.update_chain(chain)

        ids = [b.identifier for b in block_store.get_predecessor_iter()]

        self.assertEqual(
            ['abcd4', 'abcd3', 'abcd2', 'abcd1', 'abcd0'],
            ids)
示例#8
0
 def setUp(self):
     self.dir = tempfile.mkdtemp()
     self.block_db = NativeLmdbDatabase(
         os.path.join(self.dir, 'block.lmdb'),
         BlockStore.create_index_configuration())
     self.block_store = BlockStore(self.block_db)
     self.receipt_store = TransactionReceiptStore(DictDatabase())
     self._txn_ids_by_block_id = {}
     for block_id, blk_w, txn_ids in create_chain():
         self.block_store.put_blocks([blk_w.block])
         self._txn_ids_by_block_id[block_id] = txn_ids
         for txn_id in txn_ids:
             receipt = create_receipt(txn_id=txn_id,
                                      key_values=[("address", block_id)])
             self.receipt_store.put(
                 txn_id=txn_id,
                 txn_receipt=receipt)
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            settings_cache=SettingsCache(
                SettingsViewFactory(self.state_view_factory),
            ),
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            check_publish_block_frequency=0.1,
            batch_observers=[])
示例#10
0
    def __init__(self, size=3, start='0'):
        self.dir = tempfile.mkdtemp()
        self.block_db = NativeLmdbDatabase(
            os.path.join(self.dir, 'block.lmdb'),
            BlockStore.create_index_configuration())
        super().__init__(self.block_db)

        for i in range(size):
            self.add_block(_increment_key(start, i))
示例#11
0
    def test_fork_detection_on_iteration(self):
        """Given a block store where a fork occurred while using the predecessor
        iterator, it should throw a PossibleForkDetectedError.

        The fork occurrance will be simulated.
        """
        block_store = BlockStore(DictDatabase())
        chain = self._create_chain(5)
        block_store.update_chain(chain)

        iterator = block_store.get_predecessor_iter()

        self.assertEqual('abcd4', next(iterator).identifier)

        del block_store['abcd3']

        with self.assertRaises(PossibleForkDetectedError):
            next(iterator)
示例#12
0
文件: tests.py 项目: sambacha/sprawl
 def setUp(self):
     self.block_store = BlockStore(DictDatabase())
     self.receipt_store = TransactionReceiptStore(DictDatabase())
     self._txn_ids_by_block_id = {}
     for block_id, blk_w, txn_ids in create_chain():
         self.block_store[block_id] = blk_w
         self._txn_ids_by_block_id[block_id] = txn_ids
         for txn_id in txn_ids:
             receipt = create_receipt(txn_id=txn_id,
                                      key_values=[("address", block_id)])
             self.receipt_store.put(txn_id=txn_id, txn_receipt=receipt)
示例#13
0
 def setUp(self):
     self.block_store = BlockStore({})
     self.gossip = MockGossip()
     self.completer = Completer(self.block_store, self.gossip)
     self.completer._on_block_received = self._on_block_received
     self.completer._on_batch_received = self._on_batch_received
     self.private_key = signing.generate_privkey()
     self.public_key = signing.encode_pubkey(
         signing.generate_pubkey(self.private_key), "hex")
     self.blocks = []
     self.batches = []
示例#14
0
    def test_iterate_chain_from_starting_block(self):
        """Given a block store, iterate if using an predecessor iterator from
        a particular start point in the chain.

        1. Create a chain of length 5.
        2. Iterate the chain using the get_predecessor_iter from block 3
        3. Verify that the block ids match the chain, in reverse order
        """
        block_store = BlockStore(DictDatabase())
        chain = self._create_chain(5)
        block_store.update_chain(chain)

        block = block_store['abcd2']

        ids = [b.identifier
               for b in block_store.get_predecessor_iter(block)]

        self.assertEqual(
            ['abcd2', 'abcd1', 'abcd0'],
            ids)
示例#15
0
def get_databases(bind_network, data_dir):
    # Get the global state database to operate on
    global_state_db_filename = os.path.join(
        data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug(
        'verifying state in %s', global_state_db_filename)
    global_state_db = NativeLmdbDatabase(
        global_state_db_filename,
        indexes=MerkleDatabase.create_index_configuration())

    # Get the blockstore
    block_db_filename = os.path.join(
        data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = NativeLmdbDatabase(
        block_db_filename,
        indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    return global_state_db, blockstore
    def test_state_verifier(self):
        blockstore = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        global_state_db = DictDatabase()

        precalculated_state_roots = [
            "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3",
            "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7",
            "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340",
        ]

        signer = get_signer()
        populate_blockstore(blockstore, signer, precalculated_state_roots)

        verify_state(global_state_db, blockstore, "tcp://eth0:4004", "serial")

        # There is a bug in the shutdown code for some component this depends
        # on, which causes it to occassionally hang during shutdown. Just kill
        # the process for now.
        # pylint: disable=protected-access
        os._exit(0)
示例#17
0
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_private_key()
        self.public_key = signing.generate_public_key(self.signing_key)

        self.identity_signing_key = signing.generate_private_key()
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signing_key=self.identity_signing_key,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            check_publish_block_frequency=0.1,
            batch_observers=[])
def get_databases(bind_network, data_dir=None):
    # Get the global state database to operate on
    if data_dir is not None:
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('verifying state in %s', global_state_db_filename)
        global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')
    else:
        global_state_db = DictDatabase()

    # Get the blockstore
    block_db_filename = os.path.join(data_dir,
                                     'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = IndexedDatabase(block_db_filename,
                               BlockStore.serialize_block,
                               BlockStore.deserialize_block,
                               flag='c',
                               indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    return global_state_db, blockstore
示例#19
0
    def setUp(self):
        self.block_store = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        self.gossip = MockGossip()
        self.completer = Completer(self.block_store, self.gossip)
        self.completer._on_block_received = self._on_block_received
        self.completer._on_batch_received = self._on_batch_received
        self.completer._has_block = self._has_block
        self._has_block_value = True

        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        self.blocks = []
        self.batches = []
示例#20
0
def get_databases(bind_network, data_dir):
    # Get the global state database to operate on
    global_state_db_filename = os.path.join(
        data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug(
        'verifying state in %s', global_state_db_filename)
    global_state_db = NativeLmdbDatabase(
        global_state_db_filename,
        indexes=MerkleDatabase.create_index_configuration())

    # Get the blockstore
    block_db_filename = os.path.join(
        data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = NativeLmdbDatabase(
        block_db_filename,
        indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    return global_state_db, blockstore
示例#21
0
    def test_iterate_chain(self):
        """Given a block store, create an predecessor iterator.

        1. Create a chain of length 5.
        2. Iterate the chain using the get_predecessor_iter from the chain head
        3. Verify that the block ids match the chain, in reverse order
        """

        block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        chain = self._create_chain(5)
        block_store.update_chain(chain)

        ids = [b.identifier for b in block_store.get_predecessor_iter()]

        self.assertEqual(['abcd4', 'abcd3', 'abcd2', 'abcd1', 'abcd0'], ids)
示例#22
0
    def test_iterate_chain_from_starting_block(self):
        """Given a block store, iterate if using an predecessor iterator from
        a particular start point in the chain.

        1. Create a chain of length 5.
        2. Iterate the chain using the get_predecessor_iter from block 3
        3. Verify that the block ids match the chain, in reverse order
        """
        block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        chain = self._create_chain(5)
        block_store.update_chain(chain)

        block = block_store['abcd2']

        ids = [b.identifier for b in block_store.get_predecessor_iter(block)]

        self.assertEqual(['abcd2', 'abcd1', 'abcd0'], ids)
示例#23
0
 def make_block_store(data=None):
     return BlockStore(
         DictDatabase(
             data, indexes=BlockStore.create_index_configuration()))
示例#24
0
import random
import hashlib
import cbor

from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from test_completer.mock import MockGossip
from sawtooth_validator.journal.completer import Completer
from sawtooth_validator.database.dict_database import DictDatabase
from sawtooth_validator.journal.block_store import BlockStore
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch
from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, Transaction

block_store = BlockStore(
    DictDatabase(indexes=BlockStore.create_index_configuration()))
gossip = MockGossip()
context = create_context('secp256k1')
crypto_factory = CryptoFactory(context)
private_key = context.new_random_private_key()
signer = crypto_factory.new_signer(private_key)
completer = Completer(block_store, gossip)


def _create_transactions(count, missing_dep=False):
    txn_list = []

    for _ in range(count):
        payload = {
            'Verb': 'set',
            'Name': 'name' + str(random.randint(0, 100)),
示例#25
0
def verify_state(bind_network, bind_component, scheduler_type, data_dir=None):
    """
    Verify the state root hash of all blocks is in state and if not,
    reconstruct the missing state. Assumes that there are no "holes" in
    state, ie starting from genesis, state is present for all blocks up to some
    point and then not at all. If persist is False, this recomputes state in
    memory for all blocks in the blockstore and verifies the state root
    hashes.

    Raises:
        InvalidChainError: The chain in the blockstore is not valid.
        ExecutionError: An unrecoverable error was encountered during batch
            execution.
    """

    # Get the global state database to operate on
    if data_dir is not None:
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('verifying state in %s', global_state_db_filename)
        global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')
    else:
        global_state_db = DictDatabase()

    state_view_factory = StateViewFactory(global_state_db)

    # Get the blockstore
    block_db_filename = os.path.join(data_dir,
                                     'block-{}.lmdb'.format(bind_network[-2:]))
    LOGGER.debug('block store file is %s', block_db_filename)
    block_db = IndexedDatabase(block_db_filename,
                               BlockStore.serialize_block,
                               BlockStore.deserialize_block,
                               flag='c',
                               indexes=BlockStore.create_index_configuration())
    blockstore = BlockStore(block_db)

    # Check if we should do state verification
    start_block, prev_state_root = search_for_present_state_root(
        blockstore, state_view_factory)

    if start_block is None:
        LOGGER.info(
            "Skipping state verification: chain head's state root is present")
        return

    LOGGER.info("Recomputing missing state from block %s with %s scheduler",
                start_block, scheduler_type)

    component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                           name='Component')

    component_dispatcher = Dispatcher()
    component_service = Interconnect(bind_component,
                                     component_dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True,
                                     max_future_callback_workers=10)

    context_manager = ContextManager(global_state_db)

    transaction_executor = TransactionExecutor(
        service=component_service,
        context_manager=context_manager,
        settings_view_factory=SettingsViewFactory(state_view_factory),
        scheduler_type=scheduler_type,
        invalid_observers=[])

    component_service.set_check_connections(
        transaction_executor.check_connections)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST,
        tp_state_handlers.TpReceiptAddDataHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_EVENT_ADD_REQUEST,
        tp_state_handlers.TpEventAddHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_DELETE_REQUEST,
        tp_state_handlers.TpStateDeleteHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_GET_REQUEST,
        tp_state_handlers.TpStateGetHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_STATE_SET_REQUEST,
        tp_state_handlers.TpStateSetHandler(context_manager),
        component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_REGISTER_REQUEST,
        processor_handlers.ProcessorRegisterHandler(
            transaction_executor.processors), component_thread_pool)

    component_dispatcher.add_handler(
        validator_pb2.Message.TP_UNREGISTER_REQUEST,
        processor_handlers.ProcessorUnRegisterHandler(
            transaction_executor.processors), component_thread_pool)

    component_dispatcher.start()
    component_service.start()

    process_blocks(initial_state_root=prev_state_root,
                   blocks=blockstore.get_block_iter(start_block=start_block,
                                                    reverse=False),
                   transaction_executor=transaction_executor,
                   context_manager=context_manager,
                   state_view_factory=state_view_factory)

    component_dispatcher.stop()
    component_service.stop()
    component_thread_pool.shutdown(wait=True)
    transaction_executor.stop()
    context_manager.stop()
class BlockTreeManager(object):
    def __str__(self):
        return str(self.block_cache)

    def __repr__(self):
        return repr(self.block_cache)

    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            check_publish_block_frequency=0.1,
            batch_observers=[])

    @property
    def chain_head(self):
        return self.block_store.chain_head

    def set_chain_head(self, block):
        self.block_store.update_chain([block], [])

    def generate_block(self,
                       previous_block=None,
                       add_to_store=False,
                       add_to_cache=False,
                       batch_count=1,
                       batches=None,
                       status=BlockStatus.Unknown,
                       invalid_consensus=False,
                       invalid_batch=False,
                       invalid_signature=False,
                       weight=0):

        previous = self._get_block(previous_block)
        if previous is None:
            previous = self.chain_head

        header = BlockHeader(
            previous_block_id=previous.identifier,
            signer_public_key=self.identity_signer.get_public_key().as_hex(),
            block_num=previous.block_num + 1)

        block_builder = BlockBuilder(header)
        if batches:
            block_builder.add_batches(batches)

        if batch_count != 0:
            block_builder.add_batches(
                [self._generate_batch() for _ in range(batch_count)])

        if invalid_batch:
            block_builder.add_batches(
                [self._generate_batch_from_payload('BAD')])

        block_builder.set_state_hash('0' * 70)

        consensus = mock_consensus.BlockPublisher()
        consensus.finalize_block(block_builder.block_header, weight=weight)

        header_bytes = block_builder.block_header.SerializeToString()
        signature = self.identity_signer.sign(header_bytes)
        block_builder.set_signature(signature)

        block_wrapper = BlockWrapper(block_builder.build_block())

        if batches:
            block_wrapper.block.batches.extend(batches)

        if batch_count:
            block_wrapper.block.batches.extend(
                [self.generate_batch() for _ in range(batch_count)])

        if invalid_signature:
            block_wrapper.block.header_signature = "BAD"

        if invalid_consensus:
            block_wrapper.header.consensus = b'BAD'

        block_wrapper.status = status

        if add_to_cache:
            self.block_cache[block_wrapper.identifier] = block_wrapper

        if add_to_store:
            self.block_store[block_wrapper.identifier] = block_wrapper

        LOGGER.debug("Generated %s", dumps_block(block_wrapper))
        return block_wrapper

    def generate_chain(self, root_block, blocks, params=None):
        """
        Generate a new chain based on the root block and place it in the
        block cache.
        """
        if params is None:
            params = {}

        if root_block is None:
            previous = self.generate_genesis_block()
            self.block_store[previous.identifier] = previous
        else:
            previous = self._get_block(root_block)

        try:
            block_defs = [self._block_def(**params) for _ in range(blocks)]
            block_defs[-1] = self._block_def()
        except TypeError:
            block_defs = blocks

        out = []
        for block_def in block_defs:
            new_block = self.generate_block(previous_block=previous,
                                            **block_def)
            out.append(new_block)
            previous = new_block
        return out

    def create_block(self,
                     payload='payload',
                     batch_count=1,
                     previous_block_id=NULL_BLOCK_IDENTIFIER,
                     block_num=0):
        header = BlockHeader(
            previous_block_id=previous_block_id,
            signer_public_key=self.identity_signer.get_public_key().as_hex(),
            block_num=block_num)

        block_builder = BlockBuilder(header)
        block_builder.add_batches([
            self._generate_batch_from_payload(payload)
            for _ in range(batch_count)
        ])
        block_builder.set_state_hash('0' * 70)

        header_bytes = block_builder.block_header.SerializeToString()
        signature = self.identity_signer.sign(header_bytes)
        block_builder.set_signature(signature)

        block_wrapper = BlockWrapper(block_builder.build_block())
        LOGGER.debug("Generated %s", dumps_block(block_wrapper))
        return block_wrapper

    def generate_genesis_block(self):
        return self.create_block(payload='Genesis',
                                 previous_block_id=NULL_BLOCK_IDENTIFIER,
                                 block_num=0)

    def _block_def(self,
                   add_to_store=False,
                   add_to_cache=False,
                   batch_count=1,
                   status=BlockStatus.Unknown,
                   invalid_consensus=False,
                   invalid_batch=False,
                   invalid_signature=False,
                   weight=0):
        return {
            "add_to_cache": add_to_cache,
            "add_to_store": add_to_store,
            "batch_count": batch_count,
            "status": status,
            "invalid_consensus": invalid_consensus,
            "invalid_batch": invalid_batch,
            "invalid_signature": invalid_signature,
            "weight": weight
        }

    def _get_block_id(self, block):
        if block is None:
            return None
        elif isinstance(block, (Block, BlockWrapper)):
            return block.header_signature
        else:
            return str(block)

    def _get_block(self, block):
        if block is None:
            return None
        elif isinstance(block, Block):
            return BlockWrapper(block)
        elif isinstance(block, BlockWrapper):
            return block
        elif isinstance(block, str):
            return self.block_cache[block]
        else:  # WTF try something crazy
            return self.block_cache[str(block)]

    def generate_batch(self, txn_count=2, missing_deps=False, txns=None):

        batch = self._generate_batch(txn_count, missing_deps, txns)

        LOGGER.debug("Generated Batch:\n%s", dumps_batch(batch))

        return batch

    def _generate_batch(self, txn_count=2, missing_deps=False, txns=None):
        if txns is None:
            txns = []

        if txn_count != 0:
            txns += [
                self.generate_transaction('txn_' + str(i))
                for i in range(txn_count)
            ]

        if missing_deps:
            target_txn = txns[-1]
            txn_missing_deps = self.generate_transaction(
                payload='this one has a missing dependency',
                deps=[target_txn.header_signature])
            # replace the targeted txn with the missing deps txn
            txns[-1] = txn_missing_deps

        batch_header = BatchHeader(
            signer_public_key=self.signer.get_public_key().as_hex(),
            transaction_ids=[txn.header_signature
                             for txn in txns]).SerializeToString()

        batch = Batch(header=batch_header,
                      header_signature=self.signer.sign(batch_header),
                      transactions=txns)

        return batch

    def generate_transaction(self, payload='txn', deps=None):
        payload_encoded = payload.encode('utf-8')
        hasher = hashlib.sha512()
        hasher.update(payload_encoded)

        txn_header = TransactionHeader(
            dependencies=([] if deps is None else deps),
            batcher_public_key=self.signer.get_public_key().as_hex(),
            family_name='test',
            family_version='1',
            nonce=_generate_id(16),
            payload_sha512=hasher.hexdigest().encode(),
            signer_public_key=self.signer.get_public_key().as_hex(
            )).SerializeToString()

        txn = Transaction(header=txn_header,
                          header_signature=self.signer.sign(txn_header),
                          payload=payload_encoded)

        return txn

    def _generate_batch_from_payload(self, payload):
        txn = self.generate_transaction(payload)

        batch_header = BatchHeader(
            signer_public_key=self.signer.get_public_key().as_hex(),
            transaction_ids=[txn.header_signature]).SerializeToString()

        batch = Batch(header=batch_header,
                      header_signature=self.signer.sign(batch_header),
                      transactions=[txn])
        return batch
示例#27
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 bind_consensus,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signer,
                 scheduler_type,
                 permissions,
                 minimum_peer_connectivity,
                 maximum_peer_connectivity,
                 state_pruning_block_depth,
                 fork_cache_keep_time,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None,
                 component_thread_pool_workers=10,
                 network_thread_pool_workers=10,
                 signature_thread_pool_workers=3):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signer (str): cryptographic signer the validator uses for
                signing
            component_thread_pool_workers (int): number of workers in the
                component thread pool; defaults to 10.
            network_thread_pool_workers (int): number of workers in the network
                thread pool; defaults to 10.
            signature_thread_pool_workers (int): number of workers in the
                signature thread pool; defaults to 3.
        """
        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug(
            'global state database file is %s', global_state_db_filename)
        global_state_db = NativeLmdbDatabase(
            global_state_db_filename,
            indexes=MerkleDatabase.create_index_configuration())
        state_view_factory = StateViewFactory(global_state_db)
        native_state_view_factory = NativeStateViewFactory(global_state_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = NativeLmdbDatabase(
            block_db_filename,
            indexes=BlockStore.create_index_configuration())
        block_store = BlockStore(block_db)
        # The cache keep time for the journal's block cache must be greater
        # than the cache keep time used by the completer.
        base_keep_time = 1200

        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        block_status_store = BlockValidationResultStore()

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=component_thread_pool_workers,
            name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=network_thread_pool_workers,
            name='Network')
        client_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=5,
            name='Client')
        sig_pool = InstrumentedThreadPoolExecutor(
            max_workers=signature_thread_pool_workers,
            name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher()
        network_dispatcher = Dispatcher()

        # -- Setup Services -- #
        component_service = Interconnect(
            bind_component,
            component_dispatcher,
            secured=False,
            heartbeat=False,
            max_incoming_connections=20,
            monitor=True,
            max_future_callback_workers=10)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(
            bind_network,
            dispatcher=network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=120,
            max_incoming_connections=100,
            max_future_callback_workers=10,
            authorize=True,
            signer=identity_signer,
            roles=roles)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db)

        batch_tracker = BatchTracker(block_store.has_batch)

        settings_cache = SettingsCache(
            SettingsViewFactory(state_view_factory),
        )

        transaction_executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        component_service.set_check_connections(
            transaction_executor.check_connections)

        event_broadcaster = EventBroadcaster(
            component_service, block_store, receipt_store)

        # -- Consensus Engine -- #
        consensus_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=3,
            name='Consensus')
        consensus_dispatcher = Dispatcher()
        consensus_service = Interconnect(
            bind_consensus,
            consensus_dispatcher,
            secured=False,
            heartbeat=False,
            max_incoming_connections=20,
            max_future_callback_workers=10)

        consensus_registry = ConsensusRegistry()

        consensus_notifier = ConsensusNotifier(
            consensus_service,
            consensus_registry,
            identity_signer.get_public_key().as_hex())

        # -- Setup P2P Networking -- #
        gossip = Gossip(
            network_service,
            settings_cache,
            lambda: block_store.chain_head,
            block_store.chain_head_state_root,
            consensus_notifier,
            endpoint=endpoint,
            peering_mode=peering,
            initial_seed_endpoints=seeds_list,
            initial_peer_endpoints=peer_list,
            minimum_peer_connectivity=minimum_peer_connectivity,
            maximum_peer_connectivity=maximum_peer_connectivity,
            topology_check_frequency=1
        )

        consensus_notifier.set_gossip(gossip)

        completer = Completer(
            block_manager=block_manager,
            transaction_committed=block_store.has_transaction,
            get_committed_batch_by_id=block_store.get_batch,
            get_committed_batch_by_txn_id=(
                block_store.get_batch_by_transaction
            ),
            get_chain_head=lambda: unwrap_if_not_none(block_store.chain_head),
            gossip=gossip,
            cache_keep_time=base_keep_time,
            cache_purge_frequency=30,
            requested_keep_time=300)
        self._completer = completer

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions,
            block_store.chain_head_state_root,
            id_cache)

        identity_observer = IdentityObserver(
            to_update=id_cache.invalidate,
            forked=id_cache.forked)

        settings_observer = SettingsObserver(
            to_update=settings_cache.invalidate,
            forked=settings_cache.forked)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            state_view_factory=state_view_factory,
            signer=identity_signer)

        block_publisher = BlockPublisher(
            block_manager=block_manager,
            transaction_executor=transaction_executor,
            transaction_committed=block_store.has_transaction,
            batch_committed=block_store.has_batch,
            state_view_factory=native_state_view_factory,
            block_sender=block_sender,
            batch_sender=batch_sender,
            chain_head=block_store.chain_head,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory)

        block_validator = BlockValidator(
            block_manager=block_manager,
            view_factory=native_state_view_factory,
            transaction_executor=transaction_executor,
            block_status_store=block_status_store,
            permission_verifier=permission_verifier)

        chain_controller = ChainController(
            block_store=block_store,
            block_manager=block_manager,
            block_validator=block_validator,
            state_database=global_state_db,
            chain_head_lock=block_publisher.chain_head_lock,
            block_status_store=block_status_store,
            consensus_notifier=consensus_notifier,
            consensus_registry=consensus_registry,
            state_pruning_block_depth=state_pruning_block_depth,
            fork_cache_keep_time=fork_cache_keep_time,
            data_dir=data_dir,
            observers=[
                event_broadcaster,
                receipt_store,
                batch_tracker,
                identity_observer,
                settings_observer
            ])

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=transaction_executor,
            block_manager=block_manager,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender,
            receipt_store=receipt_store)

        responder = Responder(completer)

        completer.set_on_block_received(chain_controller.queue_block)

        self._incoming_batch_sender = None

        # -- Register Message Handler -- #
        network_handlers.add(
            network_dispatcher, network_service, gossip, completer,
            responder, network_thread_pool, sig_pool,
            lambda block_id: block_id in block_manager, self.has_batch,
            permission_verifier, block_publisher, consensus_notifier)

        component_handlers.add(
            component_dispatcher, gossip, context_manager,
            transaction_executor, completer, block_store, batch_tracker,
            global_state_db, self.get_chain_head_state_root_hash,
            receipt_store, event_broadcaster, permission_verifier,
            component_thread_pool, client_thread_pool,
            sig_pool, block_publisher,
            identity_signer.get_public_key().as_hex())

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        consensus_proxy = ConsensusProxy(
            block_manager=block_manager,
            chain_controller=chain_controller,
            block_publisher=block_publisher,
            gossip=gossip,
            identity_signer=identity_signer,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            state_view_factory=state_view_factory,
            consensus_registry=consensus_registry,
            consensus_notifier=consensus_notifier)

        consensus_handlers.add(
            consensus_dispatcher,
            consensus_thread_pool,
            consensus_proxy,
            consensus_notifier)

        self._block_status_store = block_status_store

        self._consensus_notifier = consensus_notifier
        self._consensus_dispatcher = consensus_dispatcher
        self._consensus_service = consensus_service
        self._consensus_thread_pool = consensus_thread_pool
        self._consensus_registry = consensus_registry

        self._client_thread_pool = client_thread_pool
        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._transaction_executor = transaction_executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._block_validator = block_validator
        self._chain_controller = chain_controller
        self._block_validator = block_validator
示例#28
0
class ClientEventsGetRequestHandlerTest(unittest.TestCase):
    def setUp(self):
        self.dir = tempfile.mkdtemp()
        self.block_db = NativeLmdbDatabase(
            os.path.join(self.dir, 'block.lmdb'),
            BlockStore.create_index_configuration())
        self.block_store = BlockStore(self.block_db)
        self.receipt_store = TransactionReceiptStore(DictDatabase())
        self._txn_ids_by_block_id = {}
        for block_id, blk_w, txn_ids in create_chain():
            self.block_store.put_blocks([blk_w.block])
            self._txn_ids_by_block_id[block_id] = txn_ids
            for txn_id in txn_ids:
                receipt = create_receipt(txn_id=txn_id,
                                         key_values=[("address", block_id)])
                self.receipt_store.put(
                    txn_id=txn_id,
                    txn_receipt=receipt)

    def test_get_events_by_block_id(self):
        """Tests that the correct events are returned by the
        ClientEventsGetRequest handler for each block id.

        """

        event_broadcaster = EventBroadcaster(
            Mock(),
            block_store=self.block_store,
            receipt_store=self.receipt_store)
        for block_id, _ in self._txn_ids_by_block_id.items():
            request = client_event_pb2.ClientEventsGetRequest()
            request.block_ids.extend([block_id])
            subscription = request.subscriptions.add()
            subscription.event_type = "sawtooth/block-commit"
            event_filter = subscription.filters.add()
            event_filter.key = "address"
            event_filter.match_string = block_id
            event_filter.filter_type = event_filter.SIMPLE_ALL

            event_filter2 = subscription.filters.add()
            event_filter2.key = "block_id"
            event_filter2.match_string = block_id
            event_filter2.filter_type = event_filter2.SIMPLE_ALL

            handler = ClientEventsGetRequestHandler(event_broadcaster)
            handler_output = handler.handle(
                "dummy_conn_id",
                request.SerializeToString())

            self.assertEqual(handler_output.message_type,
                             validator_pb2.Message.CLIENT_EVENTS_GET_RESPONSE)

            self.assertEqual(handler_output.status, HandlerStatus.RETURN)
            self.assertTrue(
                all(any(a.value == block_id
                        for a in e.attributes)
                    for e in handler_output.message_out.events),
                "Each Event has at least one attribute value that is the"
                " block id for the block.")
            self.assertEqual(handler_output.message_out.status,
                             client_event_pb2.ClientEventsGetResponse.OK)
            self.assertTrue(len(handler_output.message_out.events) > 0)
示例#29
0
 def clear(self):
     self._block_store = DictDatabase(
         indexes=BlockStore.create_index_configuration())
示例#30
0
    def _respond(self, request):
        head_block = self._get_head_block(request)
        self._validate_ids(request.block_ids)
        blocks = None
        paging_response = None

        if request.block_ids:
            blocks = self._block_store.get_blocks(request.block_ids)
            blocks = itertools.filterfalse(
                lambda block: block.block_num > head_block.block_num,
                blocks)

            # realize the iterator
            blocks = list(map(lambda blkw: blkw.block, blocks))

            paging_response = client_list_control_pb2.ClientPagingResponse()
        else:
            paging = request.paging
            sort_reverse = BlockListRequest.is_reverse(
                request.sorting, self._status.INVALID_SORT)
            limit = min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE
            iterargs = {
                'reverse': not sort_reverse
            }

            if paging.start:
                iterargs['start_block_num'] = paging.start
            elif not sort_reverse:
                iterargs['start_block'] = head_block

            block_iter = None
            try:
                block_iter = self._block_store.get_block_iter(**iterargs)
                blocks = block_iter
                if sort_reverse:
                    blocks = itertools.takewhile(
                        lambda block: block.block_num <= head_block.block_num,
                        blocks)

                blocks = itertools.islice(blocks, limit)
                # realize the result list, which will evaluate the underlying
                # iterator
                blocks = list(map(lambda blkw: blkw.block, blocks))

                next_block = next(block_iter, None)
                if next_block:
                    next_block_num = BlockStore.block_num_to_hex(
                        next_block.block_num)
                else:
                    next_block_num = None

                start = self._block_store.get(
                    blocks[0].header_signature).block_num
            except ValueError:
                if paging.start:
                    return self._status.INVALID_PAGING

                return self._status.NO_ROOT

            paging_response = client_list_control_pb2.ClientPagingResponse(
                next=next_block_num,
                limit=limit,
                start=BlockStore.block_num_to_hex(start)
            )

        if not blocks:
            return self._wrap_response(
                self._status.NO_RESOURCE,
                head_id=head_block.identifier,
                paging=paging_response)

        return self._wrap_response(
            head_id=head_block.identifier,
            paging=paging_response,
            blocks=blocks)
示例#31
0
    def __init__(self, bind_network, bind_component, endpoint,
                 peering, seeds_list, peer_list, data_dir, config_dir,
                 identity_signing_key, scheduler_type,
                 network_public_key=None,
                 network_private_key=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(data_dir,
                                         'state-deltas-{}.lmdb'.format(
                                             bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(
                                         bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)
        block_store.add_update_observer(batch_tracker)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True)

        config_file = os.path.join(config_dir, "validator.toml")

        validator_config = {}
        if os.path.exists(config_file):
            with open(config_file) as fd:
                raw_config = fd.read()
            validator_config = toml.loads(raw_config)

        if scheduler_type is None:
            scheduler_type = validator_config.get("scheduler", "serial")

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                                    StateViewFactory(merkle_db)),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker]
        )

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender
        )

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network),
            network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip),
            network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            sig_pool)

        # GOSSIP_MESSAGE 3) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip,
                completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                settings_view_factory=SettingsViewFactory(
                    StateViewFactory(merkle_db)),
                current_root_func=self._journal.get_current_root,
            ),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            StateDeltaGetEventsHandler(block_store, state_delta_store),
            thread_pool)
class BlockTreeManager(object):
    def __str__(self):
        return str(self.block_cache)

    def __repr__(self):
        return repr(self.block_cache)

    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = ConfigView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.generate_pubkey(self.signing_key)

        self.identity_signing_key = signing.generate_privkey()
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signing_key=self.identity_signing_key,
            data_dir=None,
            config_dir=None)

    @property
    def chain_head(self):
        return self.block_store.chain_head

    def set_chain_head(self, block):
        self.block_store.update_chain([block], [])

    def generate_block(self,
                       previous_block=None,
                       add_to_store=False,
                       add_to_cache=False,
                       batch_count=0,
                       status=BlockStatus.Unknown,
                       invalid_consensus=False,
                       invalid_batch=False,
                       invalid_signature=False,
                       weight=0):

        previous = self._get_block(previous_block)
        if previous is None:
            previous = self.chain_head

        self.block_publisher.on_chain_updated(previous)

        while self.block_sender.new_block is None:
            self.block_publisher.on_batch_received(
                self._generate_batch_from_payload(''))
            self.block_publisher.on_check_publish_block(True)

        block_from_sender = self.block_sender.new_block
        self.block_sender.new_block = None

        block_wrapper = BlockWrapper(block_from_sender)

        if invalid_signature:
            block_wrapper.block.header_signature = "BAD"

        if invalid_consensus:
            block_wrapper.header.consensus = b'BAD'

        if invalid_batch:
            block_wrapper.block.batches.extend(
                [self._generate_batch_from_payload('BAD')])

        block_wrapper.weight = weight
        block_wrapper.status = status

        if add_to_cache:
            self.block_cache[block_wrapper.identifier] = block_wrapper

        if add_to_store:
            self.block_store[block_wrapper.identifier] = block_wrapper

        return block_wrapper

    def generate_chain(self, root_block, blocks, params=None):
        """
        Generate a new chain based on the root block and place it in the
        block cache.
        """
        if params is None:
            params = {}

        if root_block is None:
            previous = self.generate_genesis_block()
            self.block_store[previous.identifier] = previous
        else:
            previous = self._get_block(root_block)

        try:
            block_defs = [self._block_def(**params) for _ in range(blocks)]
        except TypeError:
            block_defs = blocks

        out = []
        for block_def in block_defs:
            new_block = self.generate_block(previous_block=previous,
                                            **block_def)
            out.append(new_block)
            previous = new_block
        return out

    def _generate_block(self, payload, previous_block_id, block_num):
        header = BlockHeader(previous_block_id=previous_block_id,
                             signer_pubkey=self.public_key,
                             block_num=block_num)

        block_builder = BlockBuilder(header)
        block_builder.add_batches([self._generate_batch_from_payload(payload)])

        header_bytes = block_builder.block_header.SerializeToString()
        signature = signing.sign(header_bytes, self.identity_signing_key)
        block_builder.set_signature(signature)

        return BlockWrapper(block_builder.build_block())

    def generate_genesis_block(self):
        return self._generate_block(payload='Genesis',
                                    previous_block_id=NULL_BLOCK_IDENTIFIER,
                                    block_num=0)

    def _block_def(self,
                   add_to_store=False,
                   add_to_cache=False,
                   batch_count=0,
                   status=BlockStatus.Unknown,
                   invalid_consensus=False,
                   invalid_batch=False,
                   invalid_signature=False,
                   weight=0):
        return {
            "add_to_cache": add_to_cache,
            "add_to_store": add_to_store,
            "batch_count": batch_count,
            "status": status,
            "invalid_consensus": invalid_consensus,
            "invalid_batch": invalid_batch,
            "invalid_signature": invalid_signature,
            "weight": weight
        }

    def _get_block_id(self, block):
        if block is None:
            return None
        elif isinstance(block, Block) or\
                isinstance(block, BlockWrapper):
            return block.header_signature
        elif isinstance(block, basestring):
            return block
        else:
            return str(block)

    def _get_block(self, block):
        if block is None:
            return None
        elif isinstance(block, Block):
            return BlockWrapper(block)
        elif isinstance(block, BlockWrapper):
            return block
        elif isinstance(block, str):
            return self.block_cache[block]
        else:  # WTF try something crazy
            return self.block_cache[str(block)]

    def generate_batch(self, txn_count=2, missing_deps=False, txns=None):
        if txns is None:
            txns = [
                self.generate_transaction('txn_' + str(i))
                for i in range(txn_count)
            ]

        if missing_deps:
            target_txn = txns[-1]
            txn_missing_deps = self.generate_transaction(
                payload='this one has a missing dependency',
                deps=[target_txn.header_signature])
            # replace the targeted txn with the missing deps txn
            txns[-1] = txn_missing_deps

        batch_header = BatchHeader(signer_pubkey=self.public_key,
                                   transaction_ids=[
                                       txn.header_signature for txn in txns
                                   ]).SerializeToString()

        batch = Batch(header=batch_header,
                      header_signature=self._signed_header(batch_header),
                      transactions=txns)

        return batch

    def generate_transaction(self, payload='txn', deps=None):
        payload_encoded = payload.encode('utf-8')
        hasher = hashlib.sha512()
        hasher.update(payload_encoded)

        txn_header = TransactionHeader(
            dependencies=([] if deps is None else deps),
            batcher_pubkey=self.public_key,
            family_name='test',
            family_version='1',
            nonce=_generate_id(16),
            payload_encoding="text",
            payload_sha512=hasher.hexdigest().encode(),
            signer_pubkey=self.public_key).SerializeToString()

        txn = Transaction(header=txn_header,
                          header_signature=self._signed_header(txn_header),
                          payload=payload_encoded)

        return txn

    def _generate_batch_from_payload(self, payload):
        txn = self.generate_transaction(payload)

        batch_header = BatchHeader(signer_pubkey=self.public_key,
                                   transaction_ids=[txn.header_signature
                                                    ]).SerializeToString()

        batch = Batch(header=batch_header,
                      header_signature=self._signed_header(batch_header),
                      transactions=[txn])

        return batch

    def _signed_header(self, header):
        return signing.sign(header, self.signing_key)
示例#33
0
class BlockTreeManager(object):
    def __str__(self):
        return str(self.block_cache)

    def __repr__(self):
        return repr(self.block_cache)

    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            settings_cache=SettingsCache(
                SettingsViewFactory(self.state_view_factory),
            ),
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            check_publish_block_frequency=0.1,
            batch_observers=[])

    @property
    def chain_head(self):
        return self.block_store.chain_head

    def set_chain_head(self, block):
        self.block_store.update_chain([block], [])

    def generate_block(self, previous_block=None,
                       add_to_store=False,
                       add_to_cache=False,
                       batch_count=1,
                       batches=None,
                       status=BlockStatus.Unknown,
                       invalid_consensus=False,
                       invalid_batch=False,
                       invalid_signature=False,
                       weight=0):

        previous = self._get_block(previous_block)
        if previous is None:
            previous = self.chain_head

        header = BlockHeader(
            previous_block_id=previous.identifier,
            signer_public_key=self.identity_signer.get_public_key().as_hex(),
            block_num=previous.block_num + 1)

        block_builder = BlockBuilder(header)
        if batches:
            block_builder.add_batches(batches)

        if batch_count != 0:
            block_builder.add_batches(
                [self._generate_batch()
                    for _ in range(batch_count)])

        if invalid_batch:
            block_builder.add_batches(
                [self._generate_batch_from_payload('BAD')])

        block_builder.set_state_hash('0' * 70)

        consensus = mock_consensus.BlockPublisher()
        consensus.finalize_block(block_builder.block_header, weight=weight)

        header_bytes = block_builder.block_header.SerializeToString()
        signature = self.identity_signer.sign(header_bytes)
        block_builder.set_signature(signature)

        block_wrapper = BlockWrapper(block_builder.build_block())

        if batches:
            block_wrapper.block.batches.extend(batches)

        if batch_count:
            block_wrapper.block.batches.extend(
                [self.generate_batch() for _ in range(batch_count)])

        if invalid_signature:
            block_wrapper.block.header_signature = "BAD"

        if invalid_consensus:
            block_wrapper.header.consensus = b'BAD'

        block_wrapper.status = status

        if add_to_cache:
            self.block_cache[block_wrapper.identifier] = block_wrapper

        if add_to_store:
            self.block_store[block_wrapper.identifier] = block_wrapper

        LOGGER.debug("Generated %s", dumps_block(block_wrapper))
        return block_wrapper

    def generate_chain(self, root_block, blocks, params=None):
        """
        Generate a new chain based on the root block and place it in the
        block cache.
        """
        if params is None:
            params = {}

        if root_block is None:
            previous = self.generate_genesis_block()
            self.block_store[previous.identifier] = previous
        else:
            previous = self._get_block(root_block)

        try:
            block_defs = [self._block_def(**params) for _ in range(blocks)]
            block_defs[-1] = self._block_def()
        except TypeError:
            block_defs = blocks

        out = []
        for block_def in block_defs:
            new_block = self.generate_block(
                previous_block=previous, **block_def)
            out.append(new_block)
            previous = new_block
        return out

    def create_block(self, payload='payload', batch_count=1,
                     previous_block_id=NULL_BLOCK_IDENTIFIER, block_num=0):
        header = BlockHeader(
            previous_block_id=previous_block_id,
            signer_public_key=self.identity_signer.get_public_key().as_hex(),
            block_num=block_num)

        block_builder = BlockBuilder(header)
        block_builder.add_batches(
            [self._generate_batch_from_payload(payload)
                for _ in range(batch_count)])
        block_builder.set_state_hash('0' * 70)

        header_bytes = block_builder.block_header.SerializeToString()
        signature = self.identity_signer.sign(header_bytes)
        block_builder.set_signature(signature)

        block_wrapper = BlockWrapper(block_builder.build_block())
        LOGGER.debug("Generated %s", dumps_block(block_wrapper))
        return block_wrapper

    def generate_genesis_block(self):
        return self.create_block(
            payload='Genesis',
            previous_block_id=NULL_BLOCK_IDENTIFIER,
            block_num=0)

    def _block_def(self,
                   add_to_store=False,
                   add_to_cache=False,
                   batch_count=1,
                   status=BlockStatus.Unknown,
                   invalid_consensus=False,
                   invalid_batch=False,
                   invalid_signature=False,
                   weight=0):
        return {
            "add_to_cache": add_to_cache,
            "add_to_store": add_to_store,
            "batch_count": batch_count,
            "status": status,
            "invalid_consensus": invalid_consensus,
            "invalid_batch": invalid_batch,
            "invalid_signature": invalid_signature,
            "weight": weight
        }

    def _get_block_id(self, block):
        if block is None:
            return None
        elif isinstance(block, (Block, BlockWrapper)):
            return block.header_signature
        else:
            return str(block)

    def _get_block(self, block):
        if block is None:
            return None
        elif isinstance(block, Block):
            return BlockWrapper(block)
        elif isinstance(block, BlockWrapper):
            return block
        elif isinstance(block, str):
            return self.block_cache[block]
        else:  # WTF try something crazy
            return self.block_cache[str(block)]

    def generate_batch(self, txn_count=2, missing_deps=False,
                       txns=None):

        batch = self._generate_batch(txn_count, missing_deps, txns)

        LOGGER.debug("Generated Batch:\n%s", dumps_batch(batch))

        return batch

    def _generate_batch(self, txn_count=2, missing_deps=False,
                        txns=None):
        if txns is None:
            txns = []

        if txn_count != 0:
            txns += [
                self.generate_transaction('txn_' + str(i))
                for i in range(txn_count)
            ]

        if missing_deps:
            target_txn = txns[-1]
            txn_missing_deps = self.generate_transaction(
                payload='this one has a missing dependency',
                deps=[target_txn.header_signature])
            # replace the targeted txn with the missing deps txn
            txns[-1] = txn_missing_deps

        batch_header = BatchHeader(
            signer_public_key=self.signer.get_public_key().as_hex(),
            transaction_ids=[txn.header_signature for txn in txns]
        ).SerializeToString()

        batch = Batch(
            header=batch_header,
            header_signature=self.signer.sign(batch_header),
            transactions=txns)

        return batch

    def generate_transaction(self, payload='txn', deps=None):
        payload_encoded = payload.encode('utf-8')
        hasher = hashlib.sha512()
        hasher.update(payload_encoded)

        txn_header = TransactionHeader(
            dependencies=([] if deps is None else deps),
            batcher_public_key=self.signer.get_public_key().as_hex(),
            family_name='test',
            family_version='1',
            nonce=_generate_id(16),
            payload_sha512=hasher.hexdigest().encode(),
            signer_public_key=self.signer.get_public_key().as_hex()
        ).SerializeToString()

        txn = Transaction(
            header=txn_header,
            header_signature=self.signer.sign(txn_header),
            payload=payload_encoded)

        return txn

    def _generate_batch_from_payload(self, payload):
        txn = self.generate_transaction(payload)

        batch_header = BatchHeader(
            signer_public_key=self.signer.get_public_key().as_hex(),
            transaction_ids=[txn.header_signature]
        ).SerializeToString()

        batch = Batch(
            header=batch_header,
            header_signature=self.signer.sign(batch_header),
            transactions=[txn])
        return batch
示例#34
0
    def __init__(self, size=3, start='0'):
        super().__init__(DictDatabase(
            indexes=BlockStore.create_index_configuration()))

        for i in range(size):
            self.add_block(_increment_key(start, i))
示例#35
0
    def __init__(self, bind_network, bind_component, endpoint,
                 peering, seeds_list, peer_list, data_dir, config_dir,
                 identity_signing_key, network_public_key=None,
                 network_private_key=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(data_dir,
                                         'state-deltas-{}.lmdb'.format(
                                             bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(
                                         bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)
        block_store.add_update_observer(batch_tracker)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True)

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                StateViewFactory(merkle_db)),
            invalid_observers=[batch_tracker])
        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker]
        )

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender
        )

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network),
            network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip),
            network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            sig_pool)

        # GOSSIP_MESSAGE 3) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip,
                completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                settings_view_factory=SettingsViewFactory(
                    StateViewFactory(merkle_db)),
                current_root_func=self._journal.get_current_root,
            ),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            GetStateDeltaEventsHandler(block_store, state_delta_store),
            thread_pool)
示例#36
0
    def __init__(self, size=3, start='0'):
        super().__init__(DictDatabase(
            indexes=BlockStore.create_index_configuration()))

        for i in range(size):
            self.add_block(_increment_key(start, i))
示例#37
0
class BlockTreeManager(object):
    def block_def(self,
                  add_to_store=False,
                  add_to_cache=False,
                  batch_count=0,
                  status=BlockStatus.Unknown,
                  invalid_consensus=False,
                  invalid_batch=False,
                  invalid_signature=False,
                  weight=None):
        return {
            "add_to_cache": add_to_cache,
            "add_to_store": add_to_store,
            "batch_count": batch_count,
            "status": status,
            "invalid_consensus": invalid_consensus,
            "invalid_batch": invalid_batch,
            "invalid_signature": invalid_signature,
            "weight": weight
        }

    def __init__(self):
        self.block_sender = MockBlockSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        self.state_db[_setting_address('sawtooth.consensus.algorithm')] = \
            _setting_entry('sawtooth.consensus.algorithm',
                           'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.encode_pubkey(
            signing.generate_pubkey(self.signing_key), "hex")

        self.identity_signing_key = signing.generate_privkey()
        self.genesis_block = self._generate_genesis_block()
        self.set_chain_head(self.genesis_block)

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            squash_handler=None,
            chain_head=self.genesis_block,
            identity_signing_key=self.identity_signing_key)

    def _get_block_id(self, block):
        if (block is None):
            return None
        elif isinstance(block, Block) or\
                isinstance(block, BlockWrapper):
            return block.header_signature
        elif isinstance(block, basestring):
            return block
        else:
            return str(block)

    def _get_block(self, block):
        if (block is None):
            return None
        elif isinstance(block, Block):
            return BlockWrapper(block)
        elif isinstance(block, BlockWrapper):
            return block
        elif isinstance(block, str):
            return self.block_cache[block]
        else:  # WTF try something crazy
            return self.block_cache[str(block)]

    @property
    def chain_head(self):
        return self.block_store.chain_head

    def set_chain_head(self, block):
        self.block_store.update_chain([block], [])

    def generate_block(self,
                       previous_block=None,
                       add_to_store=False,
                       add_to_cache=False,
                       batch_count=0,
                       status=BlockStatus.Unknown,
                       invalid_consensus=False,
                       invalid_batch=False,
                       invalid_signature=False,
                       weight=None):

        previous = self._get_block(previous_block)
        if previous is None:
            previous = self.chain_head

        self.block_publisher.on_chain_updated(previous)

        while self.block_sender.new_block is None:
            self.block_publisher.on_batch_received(self._generate_batch(''))
            self.block_publisher.on_check_publish_block(True)

        block = self.block_sender.new_block
        self.block_sender.new_block = None

        block = BlockWrapper(block)

        if invalid_signature:
            block.block.header_signature = "BAD"

        block.weight = weight
        block.status = status

        if add_to_cache:
            self.block_cache[block.identifier] = block

        if add_to_store:
            self.block_store[block.identifier] = block

        return block

    def _generate_batch(self, payload):
        payload_encoded = payload.encode('utf-8')
        hasher = hashlib.sha512()
        hasher.update(payload_encoded)

        header = TransactionHeader()
        header.batcher_pubkey = self.public_key
        # txn.dependencies not yet
        header.family_name = 'test'
        header.family_version = '1'
        header.nonce = _generate_id(16)
        header.payload_encoding = "text"
        header.payload_sha512 = hasher.hexdigest().encode()
        header.signer_pubkey = self.public_key

        txn = Transaction()
        header_bytes = header.SerializeToString()
        txn.header = header_bytes
        txn.header_signature = signing.sign(header_bytes, self.signing_key)
        txn.payload = payload_encoded

        batch_header = BatchHeader()
        batch_header.signer_pubkey = self.public_key
        batch_header.transaction_ids.extend([txn.header_signature])

        batch = Batch()
        header_bytes = batch_header.SerializeToString()
        batch.header = header_bytes
        batch.header_signature = signing.sign(header_bytes, self.signing_key)
        batch.transactions.extend([txn])
        return batch

    def _generate_genesis_block(self):
        genesis_header = BlockHeader(previous_block_id=NULL_BLOCK_IDENTIFIER,
                                     signer_pubkey=self.public_key,
                                     block_num=0)

        block = BlockBuilder(genesis_header)
        block.add_batches([self._generate_batch("Genesis")])
        header_bytes = block.block_header.SerializeToString()
        signature = signing.sign(header_bytes, self.identity_signing_key)
        block.set_signature(signature)
        return BlockWrapper(block.build_block())

    def generate_chain(self, root_block, blocks, params=None):
        """
        Generate a new chain based on the root block and place it in the
        block cache.
        """
        if params is None:
            params = {}

        out = []
        if not isinstance(blocks, list):
            blocks = self.generate_chain_definition(int(blocks), params)

        if root_block is None:
            previous = self._generate_genesis_block()
            self.block_store[previous.identifier] = previous
        else:
            previous = self._get_block(root_block)
        for block in blocks:
            new_block = self.generate_block(previous_block=previous, **block)
            out.append(new_block)
            previous = new_block
        return out

    def generate_chain_definition(self, count, params=None):
        if params is None:
            params = {}

        out = []
        for _ in range(0, count):
            out.append(self.block_def(**params))
        return out

    def __str__(self):
        return str(self.block_cache)

    def __repr__(self):
        return repr(self.block_cache)
示例#38
0
 def clear(self):
     self._block_store = DictDatabase(
         indexes=BlockStore.create_index_configuration())
示例#39
0
 def make_block_store(data=None):
     return BlockStore(DictDatabase(data))
示例#40
0
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir, identity_signing_key):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            key_dir (str): path to the key directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        context_manager = ContextManager(merkle_db)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._process_pool = process_pool

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20)

        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))
        self._executor = executor

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*',
            heartbeat=True,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network, initial_peer_endpoints=peer_list)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            data_dir=data_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING, PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        # GOSSIP_MESSAGE 3) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip, completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(), network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BATCH_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(completer),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(
                self._journal.get_block_store(), completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)
示例#41
0
    def _respond(self, request):
        head_block = self._get_head_block(request)
        self._validate_ids(request.block_ids)
        blocks = None
        paging_response = None

        if request.block_ids:
            blocks = self._block_store.get_blocks(request.block_ids)
            blocks = itertools.filterfalse(
                lambda block: block.block_num > head_block.block_num, blocks)

            # realize the iterator
            blocks = list(map(lambda blkw: blkw.block, blocks))

            paging_response = client_list_control_pb2.ClientPagingResponse()
        else:
            paging = request.paging
            sort_reverse = BlockListRequest.is_reverse(
                request.sorting, self._status.INVALID_SORT)
            limit = min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE
            iterargs = {'reverse': not sort_reverse}

            if paging.start:
                iterargs['start_block_num'] = paging.start
            elif not sort_reverse:
                iterargs['start_block'] = head_block

            block_iter = None
            try:
                block_iter = self._block_store.get_block_iter(**iterargs)
                blocks = block_iter
                if sort_reverse:
                    blocks = itertools.takewhile(
                        lambda block: block.block_num <= head_block.block_num,
                        blocks)

                blocks = itertools.islice(blocks, limit)
                # realize the result list, which will evaluate the underlying
                # iterator
                blocks = list(map(lambda blkw: blkw.block, blocks))

                next_block = next(block_iter, None)
                if next_block:
                    next_block_num = BlockStore.block_num_to_hex(
                        next_block.block_num)
                else:
                    next_block_num = None

                start = self._block_store.get(
                    blocks[0].header_signature).block_num
            except ValueError:
                if paging.start:
                    return self._status.INVALID_PAGING

                return self._status.NO_ROOT

            paging_response = client_list_control_pb2.ClientPagingResponse(
                next=next_block_num,
                limit=limit,
                start=BlockStore.block_num_to_hex(start))

        if not blocks:
            return self._wrap_response(self._status.NO_RESOURCE,
                                       head_id=head_block.identifier,
                                       paging=paging_response)

        return self._wrap_response(head_id=head_block.identifier,
                                   paging=paging_response,
                                   blocks=blocks)
示例#42
0
 def make_block_store(data=None):
     return BlockStore(
         DictDatabase(data,
                      indexes=BlockStore.create_index_configuration()))
示例#43
0
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir, key_dir):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            key_dir (str): path to the key directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'n')
        context_manager = ContextManager(merkle_db)
        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'n')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False)
        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))

        identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23]

        identity_signing_key = Validator.load_identity_signing_key(
            key_dir, DEFAULT_KEY_NAME)

        network_thread_pool = ThreadPoolExecutor(max_workers=10)

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            identity=identity,
            peer_connections=peer_list,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*')

        self._gossip = Gossip(self._network)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            chain_id_manager=chain_id_manager)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING,
                                             PingHandler(),
                                             network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)
示例#44
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 bind_consensus,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signer,
                 scheduler_type,
                 permissions,
                 minimum_peer_connectivity,
                 maximum_peer_connectivity,
                 state_pruning_block_depth,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signer (str): cryptographic signer the validator uses for
                signing
        """
        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('global state database file is %s',
                     global_state_db_filename)
        global_state_db = NativeLmdbDatabase(
            global_state_db_filename,
            indexes=MerkleDatabase.create_index_configuration())
        state_view_factory = StateViewFactory(global_state_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = IndexedDatabase(
            block_db_filename,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            flag='c',
            indexes=BlockStore.create_index_configuration())
        block_store = BlockStore(block_db)
        # The cache keep time for the journal's block cache must be greater
        # than the cache keep time used by the completer.
        base_keep_time = 1200
        block_cache = BlockCache(block_store,
                                 keep_time=int(base_keep_time * 9 / 8),
                                 purge_frequency=30)

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=10, name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                             name='Network')
        client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5,
                                                            name='Client')
        sig_pool = InstrumentedThreadPoolExecutor(max_workers=3,
                                                  name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher()
        network_dispatcher = Dispatcher()

        # -- Setup Services -- #
        component_service = Interconnect(bind_component,
                                         component_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(bind_network,
                                       dispatcher=network_dispatcher,
                                       zmq_identity=zmq_identity,
                                       secured=secure,
                                       server_public_key=network_public_key,
                                       server_private_key=network_private_key,
                                       heartbeat=True,
                                       public_endpoint=endpoint,
                                       connection_timeout=120,
                                       max_incoming_connections=100,
                                       max_future_callback_workers=10,
                                       authorize=True,
                                       signer=identity_signer,
                                       roles=roles)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db)

        batch_tracker = BatchTracker(block_store)

        settings_cache = SettingsCache(
            SettingsViewFactory(state_view_factory), )

        transaction_executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        component_service.set_check_connections(
            transaction_executor.check_connections)

        event_broadcaster = EventBroadcaster(component_service, block_store,
                                             receipt_store)

        # -- Setup P2P Networking -- #
        gossip = Gossip(network_service,
                        settings_cache,
                        lambda: block_store.chain_head,
                        block_store.chain_head_state_root,
                        endpoint=endpoint,
                        peering_mode=peering,
                        initial_seed_endpoints=seeds_list,
                        initial_peer_endpoints=peer_list,
                        minimum_peer_connectivity=minimum_peer_connectivity,
                        maximum_peer_connectivity=maximum_peer_connectivity,
                        topology_check_frequency=1)

        completer = Completer(block_store,
                              gossip,
                              cache_keep_time=base_keep_time,
                              cache_purge_frequency=30,
                              requested_keep_time=300)

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        settings_observer = SettingsObserver(
            to_update=settings_cache.invalidate, forked=settings_cache.forked)

        # -- Consensus Engine -- #
        consensus_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=3, name='Consensus')
        consensus_dispatcher = Dispatcher()
        consensus_service = Interconnect(bind_consensus,
                                         consensus_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        consensus_notifier = ConsensusNotifier(consensus_service)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            signer=identity_signer)

        block_publisher = BlockPublisher(
            transaction_executor=transaction_executor,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            settings_cache=settings_cache,
            block_sender=block_sender,
            batch_sender=batch_sender,
            chain_head=block_store.chain_head,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory)

        block_publisher_batch_sender = block_publisher.batch_sender()

        block_validator = BlockValidator(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            transaction_executor=transaction_executor,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier)

        chain_controller = ChainController(
            block_store=block_store,
            block_cache=block_cache,
            block_validator=block_validator,
            state_database=global_state_db,
            chain_head_lock=block_publisher.chain_head_lock,
            state_pruning_block_depth=state_pruning_block_depth,
            data_dir=data_dir,
            observers=[
                event_broadcaster, receipt_store, batch_tracker,
                identity_observer, settings_observer
            ])

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=transaction_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(block_publisher_batch_sender.send)
        completer.set_on_block_received(chain_controller.queue_block)
        completer.set_chain_has_block(chain_controller.has_block)

        # -- Register Message Handler -- #
        network_handlers.add(network_dispatcher, network_service, gossip,
                             completer, responder, network_thread_pool,
                             sig_pool, chain_controller.has_block,
                             block_publisher.has_batch, permission_verifier,
                             block_publisher, consensus_notifier)

        component_handlers.add(component_dispatcher, gossip, context_manager,
                               transaction_executor, completer, block_store,
                               batch_tracker, global_state_db,
                               self.get_chain_head_state_root_hash,
                               receipt_store, event_broadcaster,
                               permission_verifier, component_thread_pool,
                               client_thread_pool, sig_pool, block_publisher)

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        consensus_proxy = ConsensusProxy(
            block_cache=block_cache,
            chain_controller=chain_controller,
            block_publisher=block_publisher,
            gossip=gossip,
            identity_signer=identity_signer,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            state_view_factory=state_view_factory)

        consensus_handlers.add(consensus_dispatcher, consensus_thread_pool,
                               consensus_proxy)

        self._consensus_dispatcher = consensus_dispatcher
        self._consensus_service = consensus_service
        self._consensus_thread_pool = consensus_thread_pool

        self._client_thread_pool = client_thread_pool
        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._transaction_executor = transaction_executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._chain_controller = chain_controller
        self._block_validator = block_validator
import time

from test_journal.block_tree_manager import BlockTreeManager
from sawtooth_validator.database.dict_database import DictDatabase
from sawtooth_validator.journal.block_store import BlockStore

if __name__ == '__main__':
    print("\n====== cProfile: ./validator/cprof_block_store.py ======\n")
    pr = cProfile.Profile()
    pr.enable()

    block_tree_manager = BlockTreeManager()
    block = block_tree_manager.create_block()
    block_store = BlockStore(
        DictDatabase({
            block.header_signature: block,
        },
                     indexes=BlockStore.create_index_configuration()))
    block_store.update_chain([block])

    batch_id = block.batches[0].header_signature
    stored = block_store.get_block_by_batch_id(batch_id)

    batch = block.batches[0]
    txn_id = batch.transactions[0].header_signature
    stored = block_store.get_batch_by_transaction(txn_id)

    batch_id = batch.header_signature
    stored_batch = block_store.get_batch(batch_id)

    pr.disable()