Пример #1
0
class IncomingBatchQueue:
    """This queue keeps track of the batch ids so that components on the edge
    can filter out duplicates early. However, there is still an opportunity for
    duplicates to make it into this queue, which is intentional to avoid
    blocking threads trying to put/get from the queue. Any duplicates
    introduced by this must be filtered out later.
    """
    def __init__(self):
        self._queue = queue.Queue()
        self._ids = ConcurrentSet()

    def put(self, batch):
        if batch.header_signature not in self._ids:
            self._queue.put(batch)

    def get(self, timeout=None):
        batch = self._queue.get(timeout=timeout)
        try:
            self._ids.remove(batch.header_signature)
        except KeyError:
            pass
        return batch

    def __contains__(self, batch_id):
        return batch_id in self._ids
Пример #2
0
class IncomingBatchQueue:
    """This queue keeps track of the batch ids so that components on the edge
    can filter out duplicates early. However, there is still an opportunity for
    duplicates to make it into this queue, which is intentional to avoid
    blocking threads trying to put/get from the queue. Any duplicates
    introduced by this must be filtered out later.
    """
    def __init__(self):
        self._queue = queue.Queue()
        self._ids = ConcurrentSet()

    def put(self, batch):
        if batch.header_signature not in self._ids:
            self._ids.add(batch.header_signature)
            self._queue.put(batch)

    def get(self, timeout=None, and_then=None):
        """Get a batch from the queue, blocking until a batch is available or
        timeout has occurred. If 'and_then' is passed, it is called with the
        batch before the batch is fully removed from the queue. This avoids
        concurrent client from observing that a batch is not present while it
        is transferred from one component to another.
        """
        batch = self._queue.get(timeout=timeout)
        if and_then is not None:
            and_then(batch)
        try:
            self._ids.remove(batch.header_signature)
        except KeyError:
            pass
        return batch

    def __contains__(self, batch_id):
        return batch_id in self._ids
Пример #3
0
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._public_key = identity_signer.get_public_key().as_hex()
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._settings_view_factory = SettingsViewFactory(state_view_factory)

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        self._moved_to_fork_count = COLLECTOR.counter(
            'chain_head_moved_to_fork_count', instance=self)

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()
Пример #4
0
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._settings_view_factory = SettingsViewFactory(state_view_factory)

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        self._moved_to_fork_count = COLLECTOR.counter(
            'chain_head_moved_to_fork_count', instance=self)

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()
Пример #5
0
 def __init__(self):
     self._queue = queue.Queue()
     self._ids = ConcurrentSet()
Пример #6
0
 def __init__(self, consensus_service):
     self._service = consensus_service
     self._registered_engines = ConcurrentSet()
Пример #7
0
class ConsensusNotifier:
    """Handles sending notifications to the consensus engine using the provided
    interconnect service."""
    def __init__(self, consensus_service):
        self._service = consensus_service
        self._registered_engines = ConcurrentSet()

    def _notify(self, message_type, message):
        if self._registered_engines:
            futures = self._service.send_all(message_type,
                                             message.SerializeToString())
            for future in futures:
                future.result()

    def notify_peer_connected(self, peer_id):
        """A new peer was added"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            consensus_pb2.ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id))))

    def notify_peer_disconnected(self, peer_id):
        """An existing peer was dropped"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED,
            consensus_pb2.ConsensusNotifyPeerDisconnected(
                peer_id=bytes.fromhex(peer_id)))

    def notify_peer_message(self, message, sender_id):
        """A new message was received from a peer"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_MESSAGE,
            consensus_pb2.ConsensusNotifyPeerMessage(message=message,
                                                     sender_id=sender_id))

    def notify_block_new(self, block):
        """A new block was received and passed initial consensus validation"""
        summary = hashlib.sha256()
        for batch in block.batches:
            summary.update(batch.header_signature.encode())
        block_header = BlockHeader()
        block_header.ParseFromString(block.header)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW,
            consensus_pb2.ConsensusNotifyBlockNew(
                block=consensus_pb2.ConsensusBlock(
                    block_id=bytes.fromhex(block.header_signature),
                    previous_id=bytes.fromhex(block_header.previous_block_id),
                    signer_id=bytes.fromhex(block_header.signer_public_key),
                    block_num=block_header.block_num,
                    payload=block_header.consensus,
                    summary=summary.digest())))

    def notify_block_valid(self, block_id):
        """This block can be committed successfully"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_VALID,
            consensus_pb2.ConsensusNotifyBlockValid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_invalid(self, block_id):
        """This block cannot be committed successfully"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_INVALID,
            consensus_pb2.ConsensusNotifyBlockInvalid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_commit(self, block_id):
        """This block has been committed"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_COMMIT,
            consensus_pb2.ConsensusNotifyBlockCommit(
                block_id=bytes.fromhex(block_id)))

    def add_registered_engine(self, engine_name, engine_version):
        """Add to list of registered consensus engines"""
        self._registered_engines.add((engine_name, engine_version))
Пример #8
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """

    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._settings_view_factory = SettingsViewFactory(state_view_factory)

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        self._moved_to_fork_count = COLLECTOR.counter(
            'chain_head_moved_to_fork_count', instance=self)

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _validate_batches_in_block(self, blkw, prev_state_root):
        """
        Validate all batches in the block. This includes:
            - Validating all transaction dependencies are met
            - Validating there are no duplicate batches or transactions
            - Validating execution of all batches in the block produces the
              correct state root hash

        Args:
            blkw: the block of batches to validate
            prev_state_root: the state root to execute transactions on top of

        Raises:
            BlockValidationFailure:
                If validation fails, raises this error with the reason.
            MissingDependency:
                Validation failed because of a missing dependency.
            DuplicateTransaction:
                Validation failed because of a duplicate transaction.
            DuplicateBatch:
                Validation failed because of a duplicate batch.
        """
        if not blkw.block.batches:
            return

        try:
            chain_commit_state = ChainCommitState(
                blkw.previous_block_id,
                self._block_cache,
                self._block_cache.block_store)

            scheduler = self._transaction_executor.create_scheduler(
                prev_state_root)

            chain_commit_state.check_for_duplicate_batches(
                blkw.block.batches)

            transactions = []
            for batch in blkw.block.batches:
                transactions.extend(batch.transactions)

            chain_commit_state.check_for_duplicate_transactions(
                transactions)

            chain_commit_state.check_for_transaction_dependencies(
                transactions)

            for batch, has_more in look_ahead(blkw.block.batches):
                if has_more:
                    scheduler.add_batch(batch)
                else:
                    scheduler.add_batch(batch, blkw.state_root_hash)

        except (DuplicateBatch,
                DuplicateTransaction,
                MissingDependency) as err:
            scheduler.cancel()
            raise BlockValidationFailure(
                "Block {} failed validation: {}".format(blkw, err))

        except Exception:
            scheduler.cancel()
            raise

        scheduler.finalize()
        scheduler.complete(block=True)
        state_hash = None

        for batch in blkw.batches:
            batch_result = scheduler.get_batch_execution_result(
                batch.header_signature)
            if batch_result is not None and batch_result.is_valid:
                txn_results = \
                    scheduler.get_transaction_execution_results(
                        batch.header_signature)
                blkw.execution_results.extend(txn_results)
                state_hash = batch_result.state_hash
                blkw.num_transactions += len(batch.transactions)
            else:
                raise BlockValidationFailure(
                    "Block {} failed validation: Invalid batch "
                    "{}".format(blkw, batch))

        if blkw.state_root_hash != state_hash:
            raise BlockValidationFailure(
                "Block {} failed state root hash validation. Expected {}"
                " but got {}".format(
                    blkw, blkw.state_root_hash, state_hash))

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return enforce_validation_rules(
                self._settings_view_factory.create_settings_view(
                    prev_state_root),
                blkw.header.signer_public_key,
                blkw.batches)
        return True

    def validate_block(self, blkw, chain_head=None):
        if blkw.status == BlockStatus.Valid:
            return
        elif blkw.status == BlockStatus.Invalid:
            raise BlockValidationFailure(
                'Block {} is already invalid'.format(blkw))

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor'.format(
                        blkw))

            if not self._validate_permissions(blkw, prev_state_root):
                raise BlockValidationFailure(
                    'Block {} failed permission validation'.format(blkw))

            try:
                prev_block = self._block_cache[blkw.previous_block_id]
            except KeyError:
                prev_block = None

            consensus = self._load_consensus(prev_block)
            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                raise BlockValidationFailure(
                    'Block {} failed {} consensus validation'.format(
                        blkw, consensus))

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                raise BlockValidationFailure(
                    'Block {} failed on-chain validation rules'.format(
                        blkw))

            self._validate_batches_in_block(blkw, prev_state_root)

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid

        except BlockValidationFailure as err:
            blkw.status = BlockStatus.Invalid
            raise err

        except BlockValidationError as err:
            blkw.status = BlockStatus.Unknown
            raise err

        except ChainHeadUpdated as e:
            raise e

        except Exception as e:
            LOGGER.exception(
                "Unhandled exception BlockValidator.validate_block()")
            raise e

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationError
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                raise BlockValidationError(
                    'Failed to build fork diff: block {} missing predecessor'
                    .format(blk))

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(
        self, new_blkw, cur_blkw, new_chain, cur_chain
    ):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationFailure(
                    'Block {} rejected due to wrong genesis {}'.format(
                        cur_blkw, new_blkw))

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor {}'.format(
                        new_blkw, new_blkw.previous_block_id))

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = self._load_consensus(chain_head)
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    def _load_consensus(self, block):
        """Load the consensus module using the state as of the given block."""
        if block is not None:
            return ConsensusFactory.get_configured_consensus_module(
                block.header_signature,
                BlockWrapper.state_view_for_block(
                    block,
                    self._state_view_factory))
        return ConsensusFactory.get_consensus_module('genesis')

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(self, blocks, callback):
        for block in blocks:
            if self.in_process(block.header_signature):
                LOGGER.debug("Block already in process: %s", block)
                continue

            if self.in_process(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' in process,"
                    " adding '%s' pending",
                    block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            if self.in_pending(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' is pending,"
                    " adding '%s' pending",
                    block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            LOGGER.debug(
                "Adding block %s for processing", block.identifier)

            # Add the block to the set of blocks being processed
            self._blocks_processing.add(block.identifier)

            # Schedule the block for processing
            self._thread_pool.submit(
                self.process_block_verification, block,
                self._wrap_callback(block, callback))

    def _wrap_callback(self, block, callback):
        # Internal cleanup after verification
        def wrapper(commit_new_block, result, chain_head_updated=False):
            block = result.block
            LOGGER.debug("Removing block from processing %s", block.identifier)
            try:
                self._blocks_processing.remove(block.identifier)
            except KeyError:
                LOGGER.warning(
                    "Tried to remove block from in process but it"
                    " wasn't in processes: %s",
                    block.identifier)

            # If the block was valid, submit all pending blocks for validation
            if block.status == BlockStatus.Valid:
                blocks_now_ready = self._blocks_pending.pop(
                    block.identifier, [])
                self.submit_blocks_for_verification(blocks_now_ready, callback)

            elif block.status == BlockStatus.Invalid:
                # If the block was invalid, mark all pending blocks as invalid
                blocks_now_invalid = self._blocks_pending.pop(
                    block.identifier, [])

                while blocks_now_invalid:
                    invalid_block = blocks_now_invalid.pop()
                    invalid_block.status = BlockStatus.Invalid

                    LOGGER.debug(
                        'Marking descendant block invalid: %s',
                        invalid_block)

                    # Get descendants of the descendant
                    blocks_now_invalid.extend(
                        self._blocks_pending.pop(invalid_block.identifier, []))

            elif not chain_head_updated:
                # If an error occured during validation, something is wrong
                # internally and we need to abort validation of this block
                # and all its children without marking them as invalid.
                blocks_to_remove = self._blocks_pending.pop(
                    block.identifier, [])

                while blocks_to_remove:
                    block = blocks_to_remove.pop()

                    LOGGER.error(
                        'Removing block from cache and pending due to error '
                        'during validation: %s; status: %s',
                        block, block.status)

                    del self._block_cache[block.identifier]

                    # Get descendants of the descendant
                    blocks_to_remove.extend(
                        self._blocks_pending.pop(block.identifier, []))

            callback(commit_new_block, result)

        return wrapper

    def in_process(self, block_id):
        return block_id in self._blocks_processing

    def in_pending(self, block_id):
        return block_id in self._blocks_pending

    def _add_block_to_pending(self, block):
        previous = block.previous_block_id
        self._blocks_pending.append(previous, block)

    def process_block_verification(self, block, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            try:
                # Get all the blocks since the greatest common height from the
                # longer chain.
                if self._compare_chain_height(current_block, new_block):
                    current_block, result.current_chain =\
                        self._build_fork_diff_to_common_height(
                            current_block, new_block)
                else:
                    new_block, result.new_chain =\
                        self._build_fork_diff_to_common_height(
                            new_block, current_block)

                # Add blocks to the two chains until a common ancestor is found
                # or raise an exception if no common ancestor is found
                self._extend_fork_diff_to_common_ancestor(
                    new_block, current_block,
                    result.new_chain, result.current_chain)
            except BlockValidationFailure as err:
                LOGGER.warning(
                    'Block %s failed validation: %s',
                    block, err)
                block.status = BlockStatus.Invalid
            except BlockValidationError as err:
                LOGGER.error(
                    'Encountered an error while validating %s: %s',
                    block, err)
                callback(False, result)
                return

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    try:
                        self.validate_block(
                            blk, chain_head)
                    except BlockValidationFailure as err:
                        LOGGER.warning(
                            'Block %s failed validation: %s',
                            blk, err)
                        valid = False
                    except BlockValidationError as err:
                        LOGGER.error(
                            'Encountered an error while validating %s: %s',
                            blk, err)
                        callback(False, result)
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid (invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(max(
                len(result.new_chain), len(result.current_chain)
            )):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s",
                    num, cur, new)

            commit_new_chain = self._compare_forks_consensus(chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except ChainHeadUpdated:
            LOGGER.debug(
                "Block validation failed due to chain head update: %s", block)
            callback(False, result, chain_head_updated=True)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)
Пример #9
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _validate_batches_in_block(self, blkw, prev_state_root):
        """
        Validate all batches in the block. This includes:
            - Validating all transaction dependencies are met
            - Validating there are no duplicate batches or transactions
            - Validating execution of all batches in the block produces the
              correct state root hash

        Args:
            blkw: the block of batches to validate
            prev_state_root: the state root to execute transactions on top of

        Raises:
            BlockValidationError:
                If validation fails, raises this error with the reason.
            MissingDependency:
                Validation failed because of a missing dependency.
            DuplicateTransaction:
                Validation failed because of a duplicate transaction.
            DuplicateBatch:
                Validation failed because of a duplicate batch.
        """
        if not blkw.block.batches:
            return

        try:
            chain_commit_state = ChainCommitState(
                blkw.previous_block_id, self._block_cache,
                self._block_cache.block_store)

            scheduler = self._transaction_executor.create_scheduler(
                self._squash_handler, prev_state_root)
            self._transaction_executor.execute(scheduler)

            chain_commit_state.check_for_duplicate_batches(blkw.block.batches)

            transactions = []
            for batch in blkw.block.batches:
                transactions.extend(batch.transactions)

            chain_commit_state.check_for_duplicate_transactions(transactions)

            chain_commit_state.check_for_transaction_dependencies(transactions)

            for batch, has_more in look_ahead(blkw.block.batches):
                if has_more:
                    scheduler.add_batch(batch)
                else:
                    scheduler.add_batch(batch, blkw.state_root_hash)

        except (DuplicateBatch, DuplicateTransaction,
                MissingDependency) as err:
            scheduler.cancel()
            raise BlockValidationError("Block {} failed validation: {}".format(
                blkw, err))

        except Exception:
            scheduler.cancel()
            raise

        scheduler.finalize()
        scheduler.complete(block=True)
        state_hash = None

        for batch in blkw.batches:
            batch_result = scheduler.get_batch_execution_result(
                batch.header_signature)
            if batch_result is not None and batch_result.is_valid:
                txn_results = \
                    scheduler.get_transaction_execution_results(
                        batch.header_signature)
                blkw.execution_results.extend(txn_results)
                state_hash = batch_result.state_hash
                blkw.num_transactions += len(batch.transactions)
            else:
                raise BlockValidationError(
                    "Block {} failed validation: Invalid batch "
                    "{}".format(blkw, batch))

        if blkw.state_root_hash != state_hash:
            raise BlockValidationError(
                "Block {} failed state root hash validation. Expected {}"
                " but got {}".format(blkw, blkw.state_root_hash, state_hash))

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return self._validation_rule_enforcer.validate(
                blkw, prev_state_root)
        return True

    def validate_block(self, blkw, chain_head=None):
        if blkw.status == BlockStatus.Valid:
            return
        elif blkw.status == BlockStatus.Invalid:
            raise BlockValidationError(
                'Block {} is already invalid'.format(blkw))

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor'.format(
                        blkw))

            if not self._validate_permissions(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed permission validation'.format(blkw))

            try:
                prev_block = self._block_cache[blkw.previous_block_id]
            except KeyError:
                prev_block = None

            consensus = self._load_consensus(prev_block)
            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                raise BlockValidationError(
                    'Block {} failed {} consensus validation'.format(
                        blkw, consensus))

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed on-chain validation rules'.format(blkw))

            self._validate_batches_in_block(blkw, prev_state_root)

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid

        except BlockValidationError as err:
            blkw.status = BlockStatus.Invalid
            raise err

        except ChainHeadUpdated as e:
            raise e

        except Exception as e:
            LOGGER.exception(
                "Unhandled exception BlockValidator.validate_block()")
            raise e

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationError
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Failed to build fork diff due to missing predecessor: %s",
                    blk)

                # Mark all blocks in the longer chain since the invalid block
                # as invalid.
                for blk in fork_diff:
                    blk.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Failed to build fork diff: block {} missing predecessor'.
                    format(blk))

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(self, new_blkw, cur_blkw,
                                             new_chain, cur_chain):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to wrong genesis {}'.format(
                        cur_blkw, new_blkw))

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor {}'.format(
                        new_blkw, new_blkw.previous_block_id))

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = self._load_consensus(chain_head)
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    def _load_consensus(self, block):
        """Load the consensus module using the state as of the given block."""
        if block is not None:
            return ConsensusFactory.get_configured_consensus_module(
                block.header_signature,
                BlockWrapper.state_view_for_block(block,
                                                  self._state_view_factory))
        return ConsensusFactory.get_consensus_module('genesis')

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(self, blocks, callback):
        for block in blocks:
            if self.in_process(block.header_signature):
                LOGGER.debug("Block already in process: %s", block)
                continue

            if self.in_process(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' in process,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            if self.in_pending(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' is pending,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            LOGGER.debug("Adding block %s for processing", block.identifier)

            # Add the block to the set of blocks being processed
            self._blocks_processing.add(block.identifier)

            # Schedule the block for processing
            self._thread_pool.submit(self.process_block_verification, block,
                                     self._wrap_callback(block, callback))

    def _wrap_callback(self, block, callback):
        # Internal cleanup after verification
        def wrapper(commit_new_block, result):
            LOGGER.debug("Removing block from processing %s",
                         block.identifier[:6])
            try:
                self._blocks_processing.remove(block.identifier)
            except KeyError:
                LOGGER.warning(
                    "Tried to remove block from in process but it"
                    " wasn't in processes: %s", block.identifier)

            # If the block is invalid, mark all descendant blocks as invalid
            # and remove from pending.
            if block.status == BlockStatus.Valid:
                blocks_now_ready = self._blocks_pending.pop(
                    block.identifier, [])
                self.submit_blocks_for_verification(blocks_now_ready, callback)

            else:
                # Get all the pending blocks that can now be processed
                blocks_now_invalid = self._blocks_pending.pop(
                    block.identifier, [])

                while blocks_now_invalid:
                    invalid_block = blocks_now_invalid.pop()
                    invalid_block.status = BlockStatus.Invalid

                    LOGGER.debug('Marking descendant block invalid: %s',
                                 invalid_block)

                    # Get descendants of the descendant
                    blocks_now_invalid.extend(
                        self._blocks_pending.pop(invalid_block.identifier, []))

            callback(commit_new_block, result)

        return wrapper

    def in_process(self, block_id):
        return block_id in self._blocks_processing

    def in_pending(self, block_id):
        return block_id in self._blocks_pending

    def _add_block_to_pending(self, block):
        previous = block.previous_block_id
        self._blocks_pending.append(previous, block)

    def process_block_verification(self, block, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            try:
                # Get all the blocks since the greatest common height from the
                # longer chain.
                if self._compare_chain_height(current_block, new_block):
                    current_block, result.current_chain =\
                        self._build_fork_diff_to_common_height(
                            current_block, new_block)
                else:
                    new_block, result.new_chain =\
                        self._build_fork_diff_to_common_height(
                            new_block, current_block)

                # Add blocks to the two chains until a common ancestor is found
                # or raise an exception if no common ancestor is found
                self._extend_fork_diff_to_common_ancestor(
                    new_block, current_block, result.new_chain,
                    result.current_chain)
            except BlockValidationError as err:
                LOGGER.warning('%s', err)
                callback(False, result)
                return

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    try:
                        self.validate_block(blk, chain_head)
                    except BlockValidationError as err:
                        LOGGER.warning('Block %s failed validation: %s', blk,
                                       err)
                        valid = False
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(
                    max(len(result.new_chain), len(result.current_chain))):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s", num,
                    cur, new)

            commit_new_chain = self._compare_forks_consensus(chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except ChainHeadUpdated:
            callback(False, result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)
Пример #10
0
class ConsensusNotifier:
    """
    Handles sending notifications to the consensus engine using the provided
    interconnect service.
    """
    def __init__(self, consensus_service):
        self._service = consensus_service
        self._registered_engines = ConcurrentSet()
        self._cluster = None

    """
    def set_cluster(self,cluster):
        self._cluster = cluster
        LOGGER.debug('ConsensusNotifier: set cluster=%s',cluster)
        self._service.set_cluster(self._cluster)
    """

    def _notify(self, message_type, message):
        """
        for cluster topology we should isolate others cluster from our message
        we can set cluster list from topology for self._service
        BUT in this case we are working only with out consensus engine
        """
        #LOGGER.debug('ConsensusNotifier: _notify all peers')
        if self._registered_engines:
            LOGGER.debug('ConsensusNotifier: _notify peer=%s',
                         self._service.connections_info)
            futures = self._service.send_all(  # send_all
                message_type, message.SerializeToString())
            #LOGGER.debug('ConsensusNotifier: sent _notify to num=%s peers',len(futures))
            for future in futures:
                future.result()
        else:
            LOGGER.debug(
                'ConsensusNotifier: CANT _notify - no registered engine ')

    def notify_peer_param_update(self, peer_id, cname):
        """
        peer change role or became arbiter
        """
        LOGGER.debug(
            'ConsensusNotifier: notify_peer_param_update peer_id=%s PARAM=%s',
            peer_id[:10], cname)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.PARAM_UPDATE,
                mode=ConsensusNotifyPeerConnected.NORMAL,
                info=cname))

    def notify_peer_join_cluster(self, peer_id, cname):
        """
        peer change role or became arbiter
        """
        LOGGER.debug(
            'ConsensusNotifier: notify_peer_join_cluster peer_id=%s PARAM=%s',
            peer_id[:10], cname)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.JOIN_CLUSTER,
                mode=ConsensusNotifyPeerConnected.NORMAL,
                info=cname))

    def notify_peer_change_role(self, peer_id, cname, is_arbiter=False):
        """
        peer change role or became arbiter
        """
        LOGGER.debug(
            'ConsensusNotifier: notify_peer_change_role peer_id=%s CLUSTER=%s ARBITER=%s',
            peer_id[:10], cname, is_arbiter)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.ARBITER_CHANGE
                if is_arbiter else ConsensusNotifyPeerConnected.ROLE_CHANGE,
                mode=ConsensusNotifyPeerConnected.NORMAL,
                info=cname))

    def notify_topology_cluster(self, peer_id, list=None):
        """                                                                                                                          
        peer add/del cluster                                                                                           
        """
        LOGGER.debug('ConsensusNotifier: notify_topology_cluster peer_id=%s',
                     peer_id[:10])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.DEL_CLUSTER
                if list is None else ConsensusNotifyPeerConnected.ADD_CLUSTER,
                mode=ConsensusNotifyPeerConnected.NORMAL,
                info=list))

    def notify_topology_peer(self, peer_id, list, is_new=True):
        """                                                                                                                         
        peer add/del                                                                                                        
        """
        LOGGER.debug('ConsensusNotifier: notify_topology_peer peer_id=%s',
                     peer_id[:10])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.ADD_PEER
                if is_new else ConsensusNotifyPeerConnected.DEL_PEER,
                mode=ConsensusNotifyPeerConnected.NORMAL,
                info=list))

    def notify_peer_connected(self,
                              peer_id,
                              assemble=True,
                              mode=ConsensusNotifyPeerConnected.NORMAL):
        """
        A new peer was added
        """
        LOGGER.debug(
            'ConsensusNotifier: notify_peer_connected peer_id=%s assemble=%s',
            peer_id[:10], assemble)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id)),
                status=ConsensusNotifyPeerConnected.OK
                if assemble else ConsensusNotifyPeerConnected.NOT_READY,
                mode=mode,
            ))

    def notify_peer_disconnected(self, peer_id):
        """An existing peer was dropped"""
        LOGGER.debug('ConsensusNotifier: notify_peer_disconnected peer_id=%s',
                     peer_id[:10])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED,
            consensus_pb2.ConsensusNotifyPeerDisconnected(
                peer_id=bytes.fromhex(peer_id)))

    def notify_peer_message(self, message, sender_id, message_type):
        """
        A new message was received from a peer
        before send check peer key using topology
        """
        LOGGER.debug('ConsensusNotifier: notify_peer_message=%s sender_id=%s',
                     message_type,
                     sender_id.hex()[:8])
        if message_type == 'Arbitration':
            """
            before send Arbitration we should be shure that this validator(sender_id) know about this block
            so we can send this block right now and send arbitration too or we can ask this block into Arbiter after recieving this msg
            """
            LOGGER.debug(
                'ConsensusNotifier: CHECK BLOCK for arbitration before send message consensus engine'
            )
        elif message_type == 'ArbitrationDone':
            LOGGER.debug(
                'ConsensusNotifier:  ArbitrationDone send block to arbiters')

        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_MESSAGE,
            consensus_pb2.ConsensusNotifyPeerMessage(message=message,
                                                     sender_id=sender_id))

    def notify_block_new(self, block):
        """
        A new block was received and passed initial consensus validation
        in federation mode - send only own cluster's nodes
        """

        summary = hashlib.sha256()
        for batch in block.batches:
            summary.update(batch.header_signature.encode())

        LOGGER.debug(
            'ConsensusNotifier: notify_block_new BLOCK=%s SUMMARY=%s\n',
            block.header_signature[:8],
            summary.digest().hex()[:10])
        block_header = BlockHeader()
        block_header.ParseFromString(block.header)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW,
            consensus_pb2.ConsensusNotifyBlockNew(
                block=consensus_pb2.ConsensusBlock(
                    block_id=bytes.fromhex(block.header_signature),
                    previous_id=bytes.fromhex(block_header.previous_block_id),
                    signer_id=bytes.fromhex(block_header.signer_public_key),
                    block_num=block_header.block_num,
                    payload=block_header.consensus,
                    summary=summary.digest())))

    def notify_block_valid(self, block_id):
        """This block can be committed successfully"""
        LOGGER.debug('ConsensusNotifier: notify_block_valid BLOCK=%s\n',
                     block_id[:8])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_VALID,
            consensus_pb2.ConsensusNotifyBlockValid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_invalid(self, block_id):
        """This block cannot be committed successfully"""
        LOGGER.debug('ConsensusNotifier: notify_block_invalid block=%s\n',
                     block_id[:8])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_INVALID,
            consensus_pb2.ConsensusNotifyBlockInvalid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_commit(self, block_id):
        """This block has been committed"""
        LOGGER.debug('ConsensusNotifier: notify_block_commit block=%s\n',
                     block_id[:8])
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_COMMIT,
            consensus_pb2.ConsensusNotifyBlockCommit(
                block_id=bytes.fromhex(block_id)))

    def was_registered_engine(self, engine_name):
        return self._registered_engines.__contains__(engine_name)

    def add_registered_engine(self, engine_name, engine_version):
        """Add to list of registered consensus engines"""

        engine = (engine_name, engine_version)
        if self._registered_engines.__contains__(engine):
            LOGGER.debug(
                'ConsensusNotifier: already registered consensus engine %s',
                engine_name)
        else:
            LOGGER.debug('ConsensusNotifier: add registered consensus engine')
            self._registered_engines.add(engine)
Пример #11
0
 def __init__(self, consensus_service):
     self._service = consensus_service
     self._registered_engines = ConcurrentSet()
Пример #12
0
class ConsensusNotifier:
    """Handles sending notifications to the consensus engine using the provided
    interconnect service."""

    def __init__(self, consensus_service):
        self._service = consensus_service
        self._registered_engines = ConcurrentSet()

    def _notify(self, message_type, message):
        if self._registered_engines:
            futures = self._service.send_all(
                message_type,
                message.SerializeToString())
            for future in futures:
                future.result()

    def notify_peer_connected(self, peer_id):
        """A new peer was added"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_CONNECTED,
            consensus_pb2.ConsensusNotifyPeerConnected(
                peer_info=consensus_pb2.ConsensusPeerInfo(
                    peer_id=bytes.fromhex(peer_id))))

    def notify_peer_disconnected(self, peer_id):
        """An existing peer was dropped"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED,
            consensus_pb2.ConsensusNotifyPeerDisconnected(
                peer_id=bytes.fromhex(peer_id)))

    def notify_peer_message(self, message, sender_id):
        """A new message was received from a peer"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_PEER_MESSAGE,
            consensus_pb2.ConsensusNotifyPeerMessage(
                message=message,
                sender_id=sender_id))

    def notify_block_new(self, block):
        """A new block was received and passed initial consensus validation"""
        summary = hashlib.sha256()
        for batch in block.batches:
            summary.update(batch.header_signature.encode())
        block_header = BlockHeader()
        block_header.ParseFromString(block.header)
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW,
            consensus_pb2.ConsensusNotifyBlockNew(
                block=consensus_pb2.ConsensusBlock(
                    block_id=bytes.fromhex(block.header_signature),
                    previous_id=bytes.fromhex(block_header.previous_block_id),
                    signer_id=bytes.fromhex(block_header.signer_public_key),
                    block_num=block_header.block_num,
                    payload=block_header.consensus,
                    summary=summary.digest())))

    def notify_block_valid(self, block_id):
        """This block can be committed successfully"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_VALID,
            consensus_pb2.ConsensusNotifyBlockValid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_invalid(self, block_id):
        """This block cannot be committed successfully"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_INVALID,
            consensus_pb2.ConsensusNotifyBlockInvalid(
                block_id=bytes.fromhex(block_id)))

    def notify_block_commit(self, block_id):
        """This block has been committed"""
        self._notify(
            validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_COMMIT,
            consensus_pb2.ConsensusNotifyBlockCommit(
                block_id=bytes.fromhex(block_id)))

    def add_registered_engine(self, engine_name, engine_version):
        """Add to list of registered consensus engines"""
        self._registered_engines.add((engine_name, engine_version))