Пример #1
0
    def setUp(self):
        self.block_store = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        self.gossip = MockGossip()
        self.completer = Completer(
            block_cache=BlockCache(self.block_store),
            transaction_committed=self.block_store.has_transaction,
            get_committed_batch_by_id=self.block_store.get_batch,
            get_committed_batch_by_txn_id=(
                self.block_store.get_batch_by_transaction
            ),
            get_chain_head=lambda: self.block_store.chain_head,
            gossip=self.gossip)
        self.completer._on_block_received = self._on_block_received
        self.completer._on_batch_received = self._on_batch_received
        self.completer._has_block = self._has_block
        self._has_block_value = True

        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        self.blocks = []
        self.batches = []
Пример #2
0
    def _get_block_publisher(self, state_hash):
        """Returns the block publisher based on the consensus module set by the
        "sawtooth_config" transaction family.

        Args:
            state_hash (str): The current state root hash for reading settings.

        Raises:
            InvalidGenesisStateError: if any errors occur getting the
                BlockPublisher.
        """
        state_view = self._state_view_factory.create_view(state_hash)
        try:

            class BatchPublisher(object):
                def send(self, transactions):
                    # Consensus implementations are expected to have handling
                    # in place for genesis operation. This should includes
                    # adding any authorization and registrations required
                    # for the genesis node to the Genesis Batch list and
                    # detecting validation of the Genesis Block and handle it
                    # correctly. Batch publication is not allowed during
                    # genesis operation since there is no network to validate
                    # the batch yet.
                    raise InvalidGenesisConsensusError(
                        'Consensus cannot send transactions during genesis.')

            consensus = ConsensusFactory.get_configured_consensus_module(
                state_view)
            return consensus.BlockPublisher(BlockCache(self._block_store),
                                            state_view=state_view,
                                            batch_publisher=BatchPublisher(),
                                            data_dir=self._data_dir)
        except UnknownConsensusModuleError as e:
            raise InvalidGenesisStateError(e)
Пример #3
0
 def __init__(self,
              block_store,
              gossip,
              cache_keep_time=300,
              cache_purge_frequency=30,
              requested_keep_time=1200):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_keep_time (float) Time in seconds to keep values in
         TimedCaches.
     :param cache_purge_frequency (float) Time between purging the
         TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_keep_time,
                                   cache_purge_frequency)
     self._block_store = block_store
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_keep_time,
                                           cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_keep_time,
                                          cache_purge_frequency)
     self._requested = TimedCache(requested_keep_time,
                                  cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self._has_block = None
     self.lock = RLock()
Пример #4
0
    def test_block_cache(self):
        block_store = {}
        cache = BlockCache(block_store=block_store, keep_time=1,
                           purge_frequency=1)

        header1 = BlockHeader(previous_block_id="000")
        block1 = BlockWrapper(Block(header=header1.SerializeToString(),
                                    header_signature="ABC"))

        header2 = BlockHeader(previous_block_id="ABC")
        block2 = BlockWrapper(Block(header=header2.SerializeToString(),
                                    header_signature="DEF"))

        header3 = BlockHeader(previous_block_id="BCA")
        block3 = BlockWrapper(Block(header=header3.SerializeToString(),
                                    header_signature="FED"))

        cache[block1.header_signature] = block1
        cache[block2.header_signature] = block2

        # Check that blocks are in the BlockCache
        self.assertIn("ABC", cache)
        self.assertIn("DEF", cache)

        # Wait for purge time to expire
        time.sleep(1)
        # Add "FED"
        cache[block3.header_signature] = block3

        # Check that "ABC" is still in the cache even though the keep time has
        # expired because it has a referecne count of 1 but "DEF" has been
        # removed
        self.assertIn("ABC", cache)
        self.assertNotIn("DEF", cache)
        self.assertIn("FED", cache)
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = ConfigView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.generate_pubkey(self.signing_key)

        self.identity_signing_key = signing.generate_privkey()
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signing_key=self.identity_signing_key,
            data_dir=None,
            config_dir=None)
Пример #6
0
    def __init__(
        self,
        consensus_module,
        block_store,
        state_view_factory,
        block_sender,
        transaction_executor,
        squash_handler,
        block_cache=None  # not require, allows tests to inject a
        # prepopulated block cache.
    ):
        self._consensus_module = consensus_module
        self._block_store = BlockStoreAdapter(block_store)
        self._block_cache = block_cache
        if self._block_cache is None:
            self._block_cache = BlockCache(self._block_store)
        self._state_view_factory = state_view_factory

        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._block_sender = block_sender

        self._block_publisher = None
        self._batch_queue = queue.Queue()
        self._publisher_thread = None

        self._chain_controller = None
        self._block_queue = queue.Queue()
        self._chain_thread = None
Пример #7
0
    def __init__(self):
        self.block_sender = MockBlockSender()
        self.block_store = BlockStore(DictDatabase())
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        self.state_db[_setting_address('sawtooth.consensus.algorithm')] = \
            _setting_entry('sawtooth.consensus.algorithm',
                           'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        self.signing_key = signing.generate_privkey()
        self.public_key = signing.encode_pubkey(
            signing.generate_pubkey(self.signing_key), "hex")

        self.identity_signing_key = signing.generate_privkey()
        self.genesis_block = self._generate_genesis_block()
        self.set_chain_head(self.genesis_block)

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            squash_handler=None,
            chain_head=self.genesis_block,
            identity_signing_key=self.identity_signing_key)
Пример #8
0
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge('completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()
Пример #9
0
    def test_load_from_block_store(self):
        """ Test that misses will load from the block store.
        """
        bs = {}
        bs["test"] = "value"
        bc = BlockCache(bs)

        self.assertTrue("test" in bc)
        self.assertTrue(bc["test"] == "value")
Пример #10
0
 def test_not_in_valid_block_publisher_list(self):
     factory = self.create_state_view_factory({})
     dev_mode = \
         BlockPublisher(
             block_cache=BlockCache(block_store=MockBlockStore()),
             state_view_factory=factory,
             batch_publisher=None,
             data_dir=None)
     block_header = self.create_block_header("name")
     self.assertTrue(dev_mode.check_publish_block(block_header))
Пример #11
0
 def __init__(self, block_store, gossip, cache_purge_frequency=30):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_purge_frequency (int) The time between purging the
             TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_purge_frequency)
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self.lock = RLock()
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
Пример #12
0
 def test_in_valid_block_publisher_list(self):
     factory = self.create_state_view_factory(
         {"sawtooth.consensus.valid_block_publisher": ["name"]})
     dev_mode = \
         BlockPublisher(
             block_cache=BlockCache(block_store=MockBlockStore()),
             state_view_factory=factory,
             batch_publisher=None,
             data_dir=None,
             validator_id='Validator_001')
     block_header = self.create_block_header("name")
     self.assertTrue(dev_mode.check_publish_block(block_header))
Пример #13
0
    def __init__(self,
                 block_store,
                 state_view_factory,
                 block_sender,
                 batch_sender,
                 transaction_executor,
                 squash_handler,
                 identity_signing_key,
                 chain_id_manager,
                 data_dir,
                 block_cache=None):
        """
        Creates a Journal instance.

        Args:
            block_store (:obj:): The block store.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            identity_signing_key (str): Private key for signing blocks
            chain_id_manager (:obj:`ChainIdManager`) The ChainIdManager
                instance.
            data_dir (str): directory for data storage.
            block_cache (:obj:`BlockCache`, optional): A BlockCache to use in
                place of an internally created instance. Defaults to None.
        """
        self._block_store = block_store
        self._block_cache = block_cache
        if self._block_cache is None:
            self._block_cache = BlockCache(self._block_store)
        self._state_view_factory = state_view_factory

        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signing_key = identity_signing_key
        self._block_sender = block_sender
        self._batch_sender = batch_sender

        self._block_publisher = None
        self._batch_queue = queue.Queue()
        self._publisher_thread = None

        self._chain_controller = None
        self._block_queue = queue.Queue()
        self._chain_thread = None
        self._chain_id_manager = chain_id_manager
        self._data_dir = data_dir
Пример #14
0
 def __init__(self, block_store, gossip):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     """
     self.gossip = gossip
     self.batch_cache = TimedCache()
     self.block_cache = BlockCache(block_store)
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._on_block_received = None
     self._on_batch_received = None
Пример #15
0
    def test_load_from_block_store(self):
        """ Test that misses will load from the block store.
        """
        bs = {}
        bs["test"] = "value"
        bs["test2"] = "value"
        bc = BlockCache(bs)

        self.assertTrue("test" in bc)
        self.assertTrue(bc["test2"] == "value")

        with self.assertRaises(KeyError):
            bc["test-missing"]
Пример #16
0
    def test_default_settings(self):
        factory = self.create_state_view_factory(values=None)

        dev_mode = \
            BlockPublisher(
                block_cache=BlockCache(block_store=MockBlockStore()),
                state_view_factory=factory,
                batch_publisher=None,
                data_dir=None)

        block_header = self.create_block_header()

        self.assertTrue(dev_mode.check_publish_block(block_header))
Пример #17
0
 def do_load_from_block_store():
     bs = {}
     block1 = Block(
         header=BlockHeader(previous_block_id="000").SerializeToString(),
         header_signature="test")
     bs["test"] = BlockWrapper(block1)
     block2 = Block(
         header=BlockHeader(previous_block_id="000").SerializeToString(),
         header_signature="test2")
     blkw2 = BlockWrapper(block2)
     bs["test2"] = blkw2
     bc = BlockCache(bs)
     return bc, blkw2
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        self.block_manager = BlockManager()
        self.block_manager.add_store("commit_store", self.block_store)

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            chain_head = self.genesis_block
            self.block_manager.put([chain_head.block])
            self.block_manager.persist(chain_head.block.header_signature,
                                       "commit_store")

        self.block_publisher = BlockPublisher(
            block_manager=self.block_manager,
            transaction_executor=MockTransactionExecutor(),
            transaction_committed=self.block_store.has_transaction,
            batch_committed=self.block_store.has_batch,
            state_view_factory=self.state_view_factory,
            settings_cache=SettingsCache(
                SettingsViewFactory(self.state_view_factory), ),
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            chain_head=chain_head.block,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            batch_observers=[])
    def create_chain_commit_state(
        self,
        committed_blocks,
        uncommitted_blocks,
        head_id,
    ):
        block_store = BlockStore(
            DictDatabase(indexes=BlockStore.create_index_configuration()))
        block_store.update_chain(committed_blocks)

        block_cache = BlockCache(block_store=block_store)

        for block in uncommitted_blocks:
            block_cache[block.header_signature] = block

        return ChainCommitState(head_id, block_cache, block_store)
Пример #20
0
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.dir = tempfile.mkdtemp()
        self.block_db = NativeLmdbDatabase(
            os.path.join(self.dir, 'block.lmdb'),
            BlockStore.create_index_configuration())
        self.block_store = BlockStore(self.block_db)
        self.block_cache = BlockCache(self.block_store)
        self.state_db = NativeLmdbDatabase(
            os.path.join(self.dir, "merkle.lmdb"),
            MerkleDatabase.create_index_configuration())

        self.state_view_factory = NativeStateViewFactory(self.state_db)

        self.block_manager = BlockManager()
        self.block_manager.add_commit_store(self.block_store)

        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            chain_head = self.genesis_block
            self.block_manager.put([chain_head.block])
            self.block_manager.persist(chain_head.block.header_signature,
                                       "commit_store")

        self.block_publisher = BlockPublisher(
            block_manager=self.block_manager,
            transaction_executor=MockTransactionExecutor(),
            transaction_committed=self.block_store.has_transaction,
            batch_committed=self.block_store.has_batch,
            state_view_factory=self.state_view_factory,
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            chain_head=chain_head.block,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            batch_observers=[])
Пример #21
0
    def test_min_wait_time(self):
        # non zero value of min wait time
        factory = self.create_state_view_factory(
            {"sawtooth.consensus.min_wait_time": 1})
        dev_mode = \
            BlockPublisher(
                block_cache=BlockCache(block_store=MockBlockStore()),
                state_view_factory=factory,
                batch_publisher=None,
                data_dir=None)
        block_header = self.create_block_header()

        dev_mode.initialize_block(block_header)

        self.assertFalse(dev_mode.check_publish_block(block_header))
        time.sleep(1)
        self.assertTrue(dev_mode.check_publish_block(block_header))
Пример #22
0
    def __init__(self, with_genesis=True):
        self.block_sender = MockBlockSender()
        self.batch_sender = MockBatchSender()
        self.block_store = BlockStore(DictDatabase(
            indexes=BlockStore.create_index_configuration()))
        self.block_cache = BlockCache(self.block_store)
        self.state_db = {}

        # add the mock reference to the consensus
        consensus_setting_addr = SettingsView.setting_address(
            'sawtooth.consensus.algorithm')
        self.state_db[consensus_setting_addr] = _setting_entry(
            'sawtooth.consensus.algorithm', 'test_journal.mock_consensus')

        self.state_view_factory = MockStateViewFactory(self.state_db)
        context = create_context('secp256k1')
        private_key = context.new_random_private_key()
        crypto_factory = CryptoFactory(context)
        self.signer = crypto_factory.new_signer(private_key)

        identity_private_key = context.new_random_private_key()
        self.identity_signer = crypto_factory.new_signer(identity_private_key)
        chain_head = None
        if with_genesis:
            self.genesis_block = self.generate_genesis_block()
            self.set_chain_head(self.genesis_block)
            chain_head = self.genesis_block

        self.block_publisher = BlockPublisher(
            transaction_executor=MockTransactionExecutor(),
            block_cache=self.block_cache,
            state_view_factory=self.state_view_factory,
            settings_cache=SettingsCache(
                SettingsViewFactory(self.state_view_factory),
            ),
            block_sender=self.block_sender,
            batch_sender=self.block_sender,
            squash_handler=None,
            chain_head=chain_head,
            identity_signer=self.identity_signer,
            data_dir=None,
            config_dir=None,
            permission_verifier=MockPermissionVerifier(),
            check_publish_block_frequency=0.1,
            batch_observers=[])
Пример #23
0
    def __init__(self):
        self.block_sender = MockBlockSender()
        self.block_store = BlockStoreAdapter({})
        self.block_cache = BlockCache(self.block_store)

        self.signing_key = signing.generate_privkey()
        self.public_key = signing.encode_pubkey(
            signing.generate_pubkey(self.signing_key), "hex")
        self.genesis_block = self._generate_genesis_block()
        self.block_store[self.genesis_block.identifier] = self.genesis_block
        self.set_chain_head(self.genesis_block)

        self.block_publisher = BlockPublisher(
            consensus=TestModePublisher(),
            transaction_executor=MockTransactionExecutor(),
            block_sender=self.block_sender,
            squash_handler=None,
            chain_head=self.genesis_block)
Пример #24
0
    def _get_block_publisher(self, state_hash):
        """Returns the block publisher based on the consensus module set by the
        "sawtooth_config" transaction family.

        Args:
            state_hash (str): The current state root hash for reading settings.

        Raises:
            InvalidGenesisStateError: if any errors occur getting the
                BlockPublisher.
        """
        state_view = self._state_view_factory.create_view(state_hash)
        try:
            consensus = ConsensusFactory.get_configured_consensus_module(
                state_view)
            return consensus.BlockPublisher(BlockCache(self._block_store),
                                            state_view=state_view)
        except UnknownConsensusModuleError as e:
            raise InvalidGenesisStateError(e)
Пример #25
0
    def test_load_from_block_store(self):
        """ Test that misses will load from the block store.
        """
        bs = {}
        block1 = Block(
            header=BlockHeader(previous_block_id="000").SerializeToString(),
            header_signature="test")
        bs["test"] = BlockWrapper(block1)
        block2 = Block(
            header=BlockHeader(previous_block_id="000").SerializeToString(),
            header_signature="test2")
        blkw2 = BlockWrapper(block2)
        bs["test2"] = blkw2
        bc = BlockCache(bs)

        self.assertTrue("test" in bc)
        self.assertTrue(bc["test2"] == blkw2)

        with self.assertRaises(KeyError):
            bc["test-missing"]
def do_block_cache():
    block_store = {}
    cache = BlockCache(block_store=block_store, keep_time=1,
                       purge_frequency=1)

    header1 = BlockHeader(previous_block_id="000")
    block1 = BlockWrapper(Block(header=header1.SerializeToString(),
                                header_signature="ABC"))

    header2 = BlockHeader(previous_block_id="ABC")
    block2 = BlockWrapper(Block(header=header2.SerializeToString(),
                                header_signature="DEF"))

    header3 = BlockHeader(previous_block_id="BCA")
    block3 = BlockWrapper(Block(header=header3.SerializeToString(),
                                header_signature="FED"))

    cache[block1.header_signature] = block1
    cache[block2.header_signature] = block2

    return cache
Пример #27
0
 def __init__(self, block_store, gossip, cache_purge_frequency=30):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_purge_frequency (int) The time between purging the
             TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_purge_frequency)
     self._block_store = block_store
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self.lock = RLock()
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
Пример #28
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 bind_consensus,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signer,
                 scheduler_type,
                 permissions,
                 minimum_peer_connectivity,
                 maximum_peer_connectivity,
                 state_pruning_block_depth,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signer (str): cryptographic signer the validator uses for
                signing
        """
        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('global state database file is %s',
                     global_state_db_filename)
        global_state_db = NativeLmdbDatabase(
            global_state_db_filename,
            indexes=MerkleDatabase.create_index_configuration())
        state_view_factory = StateViewFactory(global_state_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = IndexedDatabase(
            block_db_filename,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            flag='c',
            indexes=BlockStore.create_index_configuration())
        block_store = BlockStore(block_db)
        # The cache keep time for the journal's block cache must be greater
        # than the cache keep time used by the completer.
        base_keep_time = 1200
        block_cache = BlockCache(block_store,
                                 keep_time=int(base_keep_time * 9 / 8),
                                 purge_frequency=30)

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=10, name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                             name='Network')
        client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5,
                                                            name='Client')
        sig_pool = InstrumentedThreadPoolExecutor(max_workers=3,
                                                  name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher()
        network_dispatcher = Dispatcher()

        # -- Setup Services -- #
        component_service = Interconnect(bind_component,
                                         component_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(bind_network,
                                       dispatcher=network_dispatcher,
                                       zmq_identity=zmq_identity,
                                       secured=secure,
                                       server_public_key=network_public_key,
                                       server_private_key=network_private_key,
                                       heartbeat=True,
                                       public_endpoint=endpoint,
                                       connection_timeout=120,
                                       max_incoming_connections=100,
                                       max_future_callback_workers=10,
                                       authorize=True,
                                       signer=identity_signer,
                                       roles=roles)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db)

        batch_tracker = BatchTracker(block_store)

        settings_cache = SettingsCache(
            SettingsViewFactory(state_view_factory), )

        transaction_executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        component_service.set_check_connections(
            transaction_executor.check_connections)

        event_broadcaster = EventBroadcaster(component_service, block_store,
                                             receipt_store)

        # -- Setup P2P Networking -- #
        gossip = Gossip(network_service,
                        settings_cache,
                        lambda: block_store.chain_head,
                        block_store.chain_head_state_root,
                        endpoint=endpoint,
                        peering_mode=peering,
                        initial_seed_endpoints=seeds_list,
                        initial_peer_endpoints=peer_list,
                        minimum_peer_connectivity=minimum_peer_connectivity,
                        maximum_peer_connectivity=maximum_peer_connectivity,
                        topology_check_frequency=1)

        completer = Completer(block_store,
                              gossip,
                              cache_keep_time=base_keep_time,
                              cache_purge_frequency=30,
                              requested_keep_time=300)

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        settings_observer = SettingsObserver(
            to_update=settings_cache.invalidate, forked=settings_cache.forked)

        # -- Consensus Engine -- #
        consensus_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=3, name='Consensus')
        consensus_dispatcher = Dispatcher()
        consensus_service = Interconnect(bind_consensus,
                                         consensus_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        consensus_notifier = ConsensusNotifier(consensus_service)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            signer=identity_signer)

        block_publisher = BlockPublisher(
            transaction_executor=transaction_executor,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            settings_cache=settings_cache,
            block_sender=block_sender,
            batch_sender=batch_sender,
            chain_head=block_store.chain_head,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory)

        block_publisher_batch_sender = block_publisher.batch_sender()

        block_validator = BlockValidator(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            transaction_executor=transaction_executor,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier)

        chain_controller = ChainController(
            block_store=block_store,
            block_cache=block_cache,
            block_validator=block_validator,
            state_database=global_state_db,
            chain_head_lock=block_publisher.chain_head_lock,
            state_pruning_block_depth=state_pruning_block_depth,
            data_dir=data_dir,
            observers=[
                event_broadcaster, receipt_store, batch_tracker,
                identity_observer, settings_observer
            ])

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=transaction_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(block_publisher_batch_sender.send)
        completer.set_on_block_received(chain_controller.queue_block)
        completer.set_chain_has_block(chain_controller.has_block)

        # -- Register Message Handler -- #
        network_handlers.add(network_dispatcher, network_service, gossip,
                             completer, responder, network_thread_pool,
                             sig_pool, chain_controller.has_block,
                             block_publisher.has_batch, permission_verifier,
                             block_publisher, consensus_notifier)

        component_handlers.add(component_dispatcher, gossip, context_manager,
                               transaction_executor, completer, block_store,
                               batch_tracker, global_state_db,
                               self.get_chain_head_state_root_hash,
                               receipt_store, event_broadcaster,
                               permission_verifier, component_thread_pool,
                               client_thread_pool, sig_pool, block_publisher)

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        consensus_proxy = ConsensusProxy(
            block_cache=block_cache,
            chain_controller=chain_controller,
            block_publisher=block_publisher,
            gossip=gossip,
            identity_signer=identity_signer,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            state_view_factory=state_view_factory)

        consensus_handlers.add(consensus_dispatcher, consensus_thread_pool,
                               consensus_proxy)

        self._consensus_dispatcher = consensus_dispatcher
        self._consensus_service = consensus_service
        self._consensus_thread_pool = consensus_thread_pool

        self._client_thread_pool = client_thread_pool
        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._transaction_executor = transaction_executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._chain_controller = chain_controller
        self._block_validator = block_validator
Пример #29
0
    def __init__(self,
                 block_store,
                 state_view_factory,
                 block_sender,
                 batch_sender,
                 transaction_executor,
                 squash_handler,
                 identity_signing_key,
                 chain_id_manager,
                 state_delta_processor,
                 data_dir,
                 config_dir,
                 check_publish_block_frequency=0.1,
                 block_cache_purge_frequency=30,
                 block_cache_keep_time=300,
                 batch_observers=None,
                 block_cache=None):
        """
        Creates a Journal instance.

        Args:
            block_store (:obj:): The block store.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            identity_signing_key (str): Private key for signing blocks
            chain_id_manager (:obj:`ChainIdManager`) The ChainIdManager
                instance.
            state_delta_processor (:obj:`StateDeltaProcessor`): The state
                delta processor.
            data_dir (str): directory for data storage.
            config_dir (str): directory for configuration.
            check_publish_block_frequency(float): delay in seconds between
                checks if a block should be claimed.
            block_cache_purge_frequency (float): delay in seconds between
            purges of the BlockCache.
            block_cache_keep_time (float): time in seconds to hold unaccess
            blocks in the BlockCache.
            block_cache (:obj:`BlockCache`, optional): A BlockCache to use in
                place of an internally created instance. Defaults to None.
        """
        self._block_store = block_store
        self._block_cache = block_cache
        if self._block_cache is None:
            self._block_cache = BlockCache(self._block_store,
                                           keep_time=block_cache_keep_time)
        self._block_cache_purge_frequency = block_cache_purge_frequency
        self._state_view_factory = state_view_factory

        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signing_key = identity_signing_key
        self._block_sender = block_sender
        self._batch_sender = batch_sender

        self._block_publisher = None
        self._check_publish_block_frequency = check_publish_block_frequency
        self._batch_queue = queue.Queue()
        self._batch_obs = [] if batch_observers is None else batch_observers
        self._publisher_thread = None

        self._executor_threadpool = ThreadPoolExecutor(1)
        self._chain_controller = None
        self._block_queue = queue.Queue()
        self._chain_thread = None
        self._chain_id_manager = chain_id_manager
        self._state_delta_processor = state_delta_processor
        self._data_dir = data_dir
        self._config_dir = config_dir
Пример #30
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self, block_store, gossip, cache_purge_frequency=30):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_purge_frequency (int) The time between purging the
                TimedCaches.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self.lock = RLock()
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            LOGGER.debug("Request missing predecessor: %s",
                         block.previous_block_id)
            if block.previous_block_id not in self._incomplete_blocks:
                self._incomplete_blocks[block.previous_block_id] = [block]
            elif block not in self._incomplete_blocks[block.previous_block_id]:
                self._incomplete_blocks[block.previous_block_id] += [block]

            self.gossip.broadcast_block_request(block.previous_block_id)
            return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                return block
            else:
                LOGGER.debug("Block.header.batch_ids does not match set of "
                             "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    LOGGER.debug("Transaction %s in batch %s has "
                                 "unsatisfied dependency: %s",
                                 txn.header_signature,
                                 batch.header_signature,
                                 dependency)

                    dependencies.append(dependency)
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(
                dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            if txn.header_signature in self._seen_txns and \
                    self._seen_txns[txn.header_signature] == \
                    batch.header_signature:
                break
            self._seen_txns[txn.header_signature] = batch.header_signature

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def _purge_caches(self):
        if self._purge_time < time.time():
            LOGGER.debug("Purges caches of expired entries.")
            self._seen_txns.purge_expired()
            self._incomplete_batches.purge_expired()
            self._incomplete_blocks.purge_expired()
            self.batch_cache.purge_expired()
            self.block_cache.purge_expired()
            self._purge_time = time.time() + self._cache_purge_frequency

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
                self._purge_caches()

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        self._process_incomplete_batches(txn.header_signature)

    def get_chain_head(self):
        """Returns the block which is the current head of the chain.

        Returns:
            BlockWrapper: The head of the chain.
        """
        with self.lock:
            return self._block_store.chain_head

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None
Пример #31
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self, block_store, gossip, cache_purge_frequency=30):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_purge_frequency (int) The time between purging the
                TimedCaches.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_purge_frequency)
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self.lock = RLock()
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            LOGGER.debug("Request missing predecessor: %s",
                         block.previous_block_id)
            if block.previous_block_id not in self._incomplete_blocks:
                self._incomplete_blocks[block.previous_block_id] = [block]
            elif block not in self._incomplete_blocks[block.previous_block_id]:
                self._incomplete_blocks[block.previous_block_id] += [block]

            self.gossip.broadcast_block_request(block.previous_block_id)
            return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                return block
            else:
                LOGGER.debug(
                    "Block.header.batch_ids does not match set of "
                    "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    LOGGER.debug(
                        "Transaction %s in batch %s has "
                        "unsatisfied dependency: %s", txn.header_signature,
                        batch.header_signature, dependency)

                    dependencies.append(dependency)
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            if txn.header_signature in self._seen_txns and \
                    self._seen_txns[txn.header_signature] == \
                    batch.header_signature:
                break
            self._seen_txns[txn.header_signature] = batch.header_signature

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def _purge_caches(self):
        if self._purge_time < time.time():
            LOGGER.debug("Purges caches of expired entries.")
            self._seen_txns.purge_expired()
            self._incomplete_batches.purge_expired()
            self._incomplete_blocks.purge_expired()
            self.batch_cache.purge_expired()
            self.block_cache.purge_expired()
            self._purge_time = time.time() + self._cache_purge_frequency

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
                self._purge_caches()

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        self._process_incomplete_batches(txn.header_signature)

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None
Пример #32
0
 def __init__(self, block_store):
     self.batch_cache = TimedCache()
     self.block_cache = BlockCache(block_store)
     self._on_block_received = None
     self._on_batch_received = None