class GossipBatchResponseSignatureVerifier(Handler):
    def __init__(self, metrics_registry=None):
        self._seen_cache = TimedCache()
        if metrics_registry:
            self._batch_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_batch_dropped_count'))
        else:
            self._batch_dropped_count = CounterWrapper()

    def handle(self, connection_id, message_content):
        batch_response_message = GossipBatchResponse()
        batch_response_message.ParseFromString(message_content)

        batch = Batch()
        batch.ParseFromString(batch_response_message.content)
        if batch.header_signature in self._seen_cache:
            self._batch_dropped_count.inc()
            return HandlerResult(status=HandlerStatus.DROP)

        if not is_valid_batch(batch):
            LOGGER.debug("requested batch's signature is invalid: %s",
                         batch.header_signature)
            return HandlerResult(status=HandlerStatus.DROP)

        self._seen_cache[batch.header_signature] = None
        return HandlerResult(status=HandlerStatus.PASS)
class GossipBatchResponseSignatureVerifier(Handler):
    def __init__(self, metrics_registry=None):
        self._seen_cache = TimedCache()
        if metrics_registry:
            self._batch_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_batch_dropped_count'))
        else:
            self._batch_dropped_count = CounterWrapper()

    def handle(self, connection_id, message_content):
        batch_response_message = GossipBatchResponse()
        batch_response_message.ParseFromString(message_content)

        batch = Batch()
        batch.ParseFromString(batch_response_message.content)
        if batch.header_signature in self._seen_cache:
            self._batch_dropped_count.inc()
            return HandlerResult(status=HandlerStatus.DROP)

        if not is_valid_batch(batch):
            LOGGER.debug("requested batch's signature is invalid: %s",
                         batch.header_signature)
            return HandlerResult(status=HandlerStatus.DROP)

        self._seen_cache[batch.header_signature] = None
        return HandlerResult(status=HandlerStatus.PASS)
示例#3
0
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge('completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()
 def __init__(self, metrics_registry=None):
     self._seen_cache = TimedCache()
     if metrics_registry:
         self._batch_dropped_count = CounterWrapper(
             metrics_registry.counter(
                 'already_validated_batch_dropped_count'))
     else:
         self._batch_dropped_count = CounterWrapper()
 def __init__(self, metrics_registry=None):
     self._seen_cache = TimedCache()
     if metrics_registry:
         self._batch_dropped_count = CounterWrapper(
             metrics_registry.counter(
                 'already_validated_batch_dropped_count'))
     else:
         self._batch_dropped_count = CounterWrapper()
示例#6
0
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()
示例#7
0
 def _get_received_message_counter(self, tag):
     if tag not in self._received_message_counters:
         if self._metrics_registry:
             self._received_message_counters[tag] = CounterWrapper(
                 self._metrics_registry.counter(
                     'interconnect_received_message_count',
                     tags=['message_type={}'.format(tag)]))
         else:
             self._received_message_counters[tag] = CounterWrapper()
     return self._received_message_counters[tag]
示例#8
0
 def _get_tp_process_response_counter(self, tag):
     if tag not in self._tp_process_response_counters:
         if self._metrics_registry:
             self._tp_process_response_counters[tag] = CounterWrapper(
                 self._metrics_registry.counter(
                     'executor.TransactionExecutorThread.tp_process_response_count',
                     tags=['response_type={}'.format(tag)]))
         else:
             self._tp_process_response_counters[tag] = CounterWrapper()
     return self._tp_process_response_counters[tag]
class ClientBatchSubmitBackpressureHandler(Handler):
    """This handler receives a batch list, and accepts it if the system is
    able.  Otherwise it returns a QUEUE_FULL response.
    """

    def __init__(self, can_accept_fn, queue_info_fn, metrics_registry=None):
        self._can_accept = can_accept_fn
        self._queue_info = queue_info_fn
        self._applying_backpressure = False

        if metrics_registry:
            self._batches_rejected_count = CounterWrapper(
                metrics_registry.counter(
                    'backpressure_batches_rejected_count'))
            self._batches_rejected_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    'backpressure_batches_rejected_gauge', default=0))
        else:
            self._batches_rejected_count = CounterWrapper()
            self._batches_rejected_gauge = GaugeWrapper()

    def handle(self, connection_id, message_content):
        if not self._can_accept():
            if not self._applying_backpressure:
                self._applying_backpressure = True
                LOGGER.info(
                    'Applying back pressure on client submitted batches: '
                    'current depth: %s, limit: %s',
                    *self._queue_info())

            self._batches_rejected_count.inc()
            self._batches_rejected_gauge.set_value(
                self._batches_rejected_gauge.get_value() + 1)

            response = ClientBatchSubmitResponse(
                status=ClientBatchSubmitResponse.QUEUE_FULL)
            return HandlerResult(
                status=HandlerStatus.RETURN,
                message_out=response,
                message_type=Message.CLIENT_BATCH_SUBMIT_RESPONSE
            )
        else:
            if self._applying_backpressure:
                self._applying_backpressure = False
                self._batches_rejected_gauge.set_value(0)
                LOGGER.info(
                    'Ending back pressure on client submitted batches: '
                    'current depth: %s, limit: %s',
                    *self._queue_info())

        return HandlerResult(status=HandlerStatus.PASS)
class GossipMessageSignatureVerifier(Handler):
    def __init__(self, metrics_registry=None):
        self._seen_cache = TimedCache()
        if metrics_registry:
            self._batch_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_batch_dropped_count'))
            self._block_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_block_dropped_count'))
        else:
            self._batch_dropped_count = CounterWrapper()
            self._block_dropped_count = CounterWrapper()

    def handle(self, connection_id, message_content):
        gossip_message = GossipMessage()
        gossip_message.ParseFromString(message_content)

        if gossip_message.content_type == GossipMessage.BLOCK:
            block = Block()
            block.ParseFromString(gossip_message.content)
            if block.header_signature in self._seen_cache:
                self._block_dropped_count.inc()
                return HandlerResult(status=HandlerStatus.DROP)

            if not is_valid_block(block):
                LOGGER.debug("block signature is invalid: %s",
                             block.header_signature)
                return HandlerResult(status=HandlerStatus.DROP)

            self._seen_cache[block.header_signature] = None
            return HandlerResult(status=HandlerStatus.PASS)

        elif gossip_message.content_type == GossipMessage.BATCH:
            batch = Batch()
            batch.ParseFromString(gossip_message.content)
            if batch.header_signature in self._seen_cache:
                self._batch_dropped_count.inc()
                return HandlerResult(status=HandlerStatus.DROP)

            if not is_valid_batch(batch):
                LOGGER.debug("batch signature is invalid: %s",
                             batch.header_signature)
                return HandlerResult(status=HandlerStatus.DROP)

            self._seen_cache[batch.header_signature] = None
            return HandlerResult(status=HandlerStatus.PASS)

        # should drop the message if it does not have a valid content_type
        return HandlerResult(status=HandlerStatus.DROP)
示例#11
0
class GossipMessageSignatureVerifier(Handler):
    def __init__(self, metrics_registry=None):
        self._seen_cache = TimedCache()
        if metrics_registry:
            self._batch_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_batch_dropped_count'))
            self._block_dropped_count = CounterWrapper(
                metrics_registry.counter(
                    'already_validated_block_dropped_count'))
        else:
            self._batch_dropped_count = CounterWrapper()
            self._block_dropped_count = CounterWrapper()

    def handle(self, connection_id, message_content):
        gossip_message = GossipMessage()
        gossip_message.ParseFromString(message_content)

        if gossip_message.content_type == GossipMessage.BLOCK:
            block = Block()
            block.ParseFromString(gossip_message.content)
            if block.header_signature in self._seen_cache:
                self._block_dropped_count.inc()
                return HandlerResult(status=HandlerStatus.DROP)

            if not is_valid_block(block):
                LOGGER.debug("block signature is invalid: %s",
                             block.header_signature)
                return HandlerResult(status=HandlerStatus.DROP)

            self._seen_cache[block.header_signature] = None
            return HandlerResult(status=HandlerStatus.PASS)

        elif gossip_message.content_type == GossipMessage.BATCH:
            batch = Batch()
            batch.ParseFromString(gossip_message.content)
            if batch.header_signature in self._seen_cache:
                self._batch_dropped_count.inc()
                return HandlerResult(status=HandlerStatus.DROP)

            if not is_valid_batch(batch):
                LOGGER.debug("batch signature is invalid: %s",
                             batch.header_signature)
                return HandlerResult(status=HandlerStatus.DROP)

            self._seen_cache[batch.header_signature] = None
            return HandlerResult(status=HandlerStatus.PASS)

        # should drop the message if it does not have a valid content_type
        return HandlerResult(status=HandlerStatus.DROP)
示例#12
0
class ClientBatchSubmitBackpressureHandler(Handler):
    """This handler receives a batch list, and accepts it if the system is
    able.  Otherwise it returns a QUEUE_FULL response.
    """
    def __init__(self, can_accept_fn, queue_info_fn, metrics_registry=None):
        self._can_accept = can_accept_fn
        self._queue_info = queue_info_fn
        self._applying_backpressure = False

        if metrics_registry:
            self._batches_rejected_count = CounterWrapper(
                metrics_registry.counter(
                    'back_pressure_handlers.ClientBatchSubmitBackpressureHandler.backpressure_batches_rejected_count'
                ))
            self._batches_rejected_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    'back_pressure_handlers.ClientBatchSubmitBackpressureHandler.backpressure_batches_rejected_gauge',
                    default=0))
        else:
            self._batches_rejected_count = CounterWrapper()
            self._batches_rejected_gauge = GaugeWrapper()

    def handle(self, connection_id, message_content):
        if not self._can_accept():
            if not self._applying_backpressure:
                self._applying_backpressure = True
                LOGGER.info(
                    'Applying back pressure on client submitted batches: current depth: %s, limit: %s',
                    *self._queue_info())

            self._batches_rejected_count.inc()
            self._batches_rejected_gauge.set_value(
                self._batches_rejected_gauge.get_value() + 1)

            response = ClientBatchSubmitResponse(
                status=ClientBatchSubmitResponse.QUEUE_FULL)
            return HandlerResult(
                status=HandlerStatus.RETURN,
                message_out=response,
                message_type=Message.CLIENT_BATCH_SUBMIT_RESPONSE)
        else:
            if self._applying_backpressure:
                self._applying_backpressure = False
                self._batches_rejected_gauge.set_value(0)
                LOGGER.info(
                    'Ending back pressure on client submitted batches: current depth: %s, limit: %s',
                    *self._queue_info())

        return HandlerResult(status=HandlerStatus.PASS)
    def __init__(self, can_accept_fn, queue_info_fn, metrics_registry=None):
        self._can_accept = can_accept_fn
        self._queue_info = queue_info_fn
        self._applying_backpressure = False

        if metrics_registry:
            self._batches_rejected_count = CounterWrapper(
                metrics_registry.counter(
                    'backpressure_batches_rejected_count'))
            self._batches_rejected_gauge = GaugeWrapper(
                metrics_registry.gauge('backpressure_batches_rejected_gauge',
                                       default=0))
        else:
            self._batches_rejected_count = CounterWrapper()
            self._batches_rejected_gauge = GaugeWrapper()
    def __init__(self, can_accept_fn, queue_info_fn, metrics_registry=None):
        self._can_accept = can_accept_fn
        self._queue_info = queue_info_fn
        self._applying_backpressure = False

        if metrics_registry:
            self._batches_rejected_count = CounterWrapper(
                metrics_registry.counter(
                    'backpressure_batches_rejected_count'))
            self._batches_rejected_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    'backpressure_batches_rejected_gauge', default=0))
        else:
            self._batches_rejected_count = CounterWrapper()
            self._batches_rejected_gauge = GaugeWrapper()
示例#15
0
    def __init__(self,
                 service,
                 context_manager,
                 scheduler,
                 processors,
                 waiting_threadpool,
                 settings_view_factory,
                 invalid_observers,
                 metrics_registry=None):
        """
        Args:
            service (Interconnect): The zmq internal interface
            context_manager (ContextManager): The cached state for tps
            scheduler (scheduler.Scheduler): Provides the order of txns to
                execute.
            processors (ProcessorIteratorCollection): Provides the next
                transaction processor to send to.
            waiters_by_type (_WaitersByType): Queues up transactions based on
                processor type.
            waiting_threadpool (ThreadPoolExecutor): A thread pool to run
                indefinite waiting functions in.
            settings_view_factory (SettingsViewFactory): Read the configuration
                state
        Attributes:
            _tp_settings_key (str): the key used to reference the part of state
                where the list of required transaction processors are.
        """
        super(TransactionExecutorThread, self).__init__()
        self._service = service
        self._context_manager = context_manager
        self._scheduler = scheduler
        self._processors = processors
        self._settings_view_factory = settings_view_factory
        self._tp_settings_key = "sawtooth.validator.transaction_families"
        self._waiters_by_type = _WaitersByType()
        self._waiting_threadpool = waiting_threadpool
        self._done = False
        self._invalid_observers = invalid_observers
        self._open_futures = {}
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._transaction_execution_count = CounterWrapper(
                metrics_registry.counter('transaction_execution_count'))
        else:
            self._transaction_execution_count = CounterWrapper()
示例#16
0
    def __init__(self,
                 service,
                 context_manager,
                 scheduler,
                 processors,
                 waiting_threadpool,
                 settings_view_factory,
                 invalid_observers,
                 metrics_registry=None):
        """
        Args:
            service (Interconnect): The zmq internal interface
            context_manager (ContextManager): The cached state for tps
            scheduler (scheduler.Scheduler): Provides the order of txns to
                execute.
            processors (ProcessorIteratorCollection): Provides the next
                transaction processor to send to.
            waiters_by_type (_WaitersByType): Queues up transactions based on
                processor type.
            waiting_threadpool (ThreadPoolExecutor): A thread pool to run
                indefinite waiting functions in.
            settings_view_factory (SettingsViewFactory): Read the configuration
                state
        Attributes:
            _tp_settings_key (str): the key used to reference the part of state
                where the list of required transaction processors are.
        """
        super(TransactionExecutorThread, self).__init__()
        self._service = service
        self._context_manager = context_manager
        self._scheduler = scheduler
        self._processors = processors
        self._settings_view_factory = settings_view_factory
        self._tp_settings_key = "sawtooth.validator.transaction_families"
        self._waiters_by_type = _WaitersByType()
        self._waiting_threadpool = waiting_threadpool
        self._done = False
        self._invalid_observers = invalid_observers
        self._open_futures = {}
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._transaction_execution_count = CounterWrapper(
                metrics_registry.counter('transaction_execution_count'))
        else:
            self._transaction_execution_count = CounterWrapper()
示例#17
0
class TransactionExecutorThread(object):
    """A thread of execution controlled by the TransactionExecutor.
    Provides the functionality that the journal can process on several
    schedulers at once.
    """
    def __init__(self,
                 service,
                 context_manager,
                 scheduler,
                 processors,
                 waiting_threadpool,
                 settings_view_factory,
                 invalid_observers,
                 metrics_registry=None):
        """
        Args:
            service (Interconnect): The zmq internal interface
            context_manager (ContextManager): The cached state for tps
            scheduler (scheduler.Scheduler): Provides the order of txns to
                execute.
            processors (ProcessorIteratorCollection): Provides the next
                transaction processor to send to.
            waiters_by_type (_WaitersByType): Queues up transactions based on
                processor type.
            waiting_threadpool (ThreadPoolExecutor): A thread pool to run
                indefinite waiting functions in.
            settings_view_factory (SettingsViewFactory): Read the configuration
                state
        Attributes:
            _tp_settings_key (str): the key used to reference the part of state
                where the list of required transaction processors are.
        """
        super(TransactionExecutorThread, self).__init__()
        self._service = service
        self._context_manager = context_manager
        self._scheduler = scheduler
        self._processors = processors
        self._settings_view_factory = settings_view_factory
        self._tp_settings_key = "sawtooth.validator.transaction_families"
        self._waiters_by_type = _WaitersByType()
        self._waiting_threadpool = waiting_threadpool
        self._done = False
        self._invalid_observers = invalid_observers
        self._open_futures = {}
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._transaction_execution_count = CounterWrapper(
                metrics_registry.counter('transaction_execution_count'))
        else:
            self._transaction_execution_count = CounterWrapper()

    def _future_done_callback(self, request, result):
        """
        :param request (bytes):the serialized request
        :param result (FutureResult):
        """
        req = processor_pb2.TpProcessRequest()
        req.ParseFromString(request)
        response = processor_pb2.TpProcessResponse()
        response.ParseFromString(result.content)

        if result.connection_id in self._open_futures and \
                req.signature in self._open_futures[result.connection_id]:
            del self._open_futures[result.connection_id][req.signature]

        if response.status == processor_pb2.TpProcessResponse.OK:
            state_sets, state_deletes, events, data = \
                self._context_manager.get_execution_results(req.context_id)

            state_changes = [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    value=value,
                    type=transaction_receipt_pb2.StateChange.SET)
                for addr, value in state_sets.items()
            ] + [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    type=transaction_receipt_pb2.StateChange.DELETE)
                for addr in state_deletes
            ]

            self._scheduler.set_transaction_execution_result(
                txn_signature=req.signature,
                is_valid=True,
                context_id=req.context_id,
                state_changes=state_changes,
                events=events,
                data=data)

        elif response.status == processor_pb2.TpProcessResponse.INTERNAL_ERROR:
            header = transaction_pb2.TransactionHeader()
            header.ParseFromString(req.header)

            processor_type = processor_iterator.ProcessorType(
                header.family_name, header.family_version)

            self._execute_or_wait_for_processor_type(processor_type, request,
                                                     req.signature)

        else:
            self._context_manager.delete_contexts(
                context_id_list=[req.context_id])

            self._scheduler.set_transaction_execution_result(
                txn_signature=req.signature,
                is_valid=False,
                context_id=req.context_id,
                error_message=response.message,
                error_data=response.extended_data)

            for observer in self._invalid_observers:
                observer.notify_txn_invalid(req.signature, response.message,
                                            response.extended_data)

    def execute_thread(self):
        try:
            self._execute_schedule()
        except Exception as exc:  # pylint: disable=broad-except
            LOGGER.exception(
                "Unhandled exception while executing schedule: %s", exc)

    def _execute_schedule(self):
        for txn_info in self._scheduler:
            self._transaction_execution_count.inc()

            txn = txn_info.txn
            header = transaction_pb2.TransactionHeader()
            header.ParseFromString(txn.header)

            processor_type = processor_iterator.ProcessorType(
                header.family_name, header.family_version)

            config = self._settings_view_factory.create_settings_view(
                txn_info.state_hash)
            transaction_families = config.get_setting(
                key=self._tp_settings_key, default_value="[]")

            # After reading the transaction families required in configuration
            # try to json.loads them into a python object
            # If there is a misconfiguration, proceed as if there is no
            # configuration.
            try:
                transaction_families = json.loads(transaction_families)
                required_transaction_processors = [
                    processor_iterator.ProcessorType(d.get('family'),
                                                     d.get('version'))
                    for d in transaction_families
                ]
            except ValueError:
                LOGGER.warning(
                    "sawtooth.validator.transaction_families "
                    "misconfigured. Expecting a json array, found"
                    " %s", transaction_families)
                required_transaction_processors = []

            # First check if the transaction should be failed
            # based on configuration
            if required_transaction_processors and \
                    processor_type not in required_transaction_processors:
                # The txn processor type is not in the required
                # transaction processors so
                # failing transaction right away
                LOGGER.debug(
                    "failing transaction %s of type (name=%s,"
                    "version=%s) since it isn't"
                    " required in the configuration", txn.header_signature,
                    processor_type.name, processor_type.version)

                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            try:
                context_id = self._context_manager.create_context(
                    state_hash=txn_info.state_hash,
                    base_contexts=txn_info.base_context_ids,
                    inputs=list(header.inputs),
                    outputs=list(header.outputs))
            except KeyError:
                LOGGER.warning(
                    "Error creating context for transaction %s, "
                    "scheduler provided a base context that was not "
                    "in the context manager.", txn.header_signature)
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            except CreateContextException as cce:
                LOGGER.info("Exception creating context: %s", cce)
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            content = processor_pb2.TpProcessRequest(
                header=header,
                payload=txn.payload,
                signature=txn.header_signature,
                context_id=context_id).SerializeToString()

            # Since we have already checked if the transaction should be failed
            # all other cases should either be executed or waited for.
            self._execute_or_wait_for_processor_type(
                processor_type=processor_type,
                content=content,
                signature=txn.header_signature)

        self._done = True

    def _execute_or_wait_for_processor_type(self, processor_type, content,
                                            signature):
        processor = self._processors.get_next_of_type(
            processor_type=processor_type)
        if processor is None:
            LOGGER.debug(
                "no transaction processors registered for "
                "processor type %s", processor_type)
            if processor_type not in self._waiters_by_type:
                in_queue = queue.Queue()
                in_queue.put_nowait((content, signature))
                waiter = _Waiter(self._send_and_process_result,
                                 processor_type=processor_type,
                                 processors=self._processors,
                                 in_queue=in_queue,
                                 waiters_by_type=self._waiters_by_type)
                self._waiters_by_type[processor_type] = waiter
                self._waiting_threadpool.submit(waiter.run_in_threadpool)
            else:
                self._waiters_by_type[processor_type].add_to_in_queue(
                    (content, signature))
        else:
            connection_id = processor.connection_id
            self._send_and_process_result(content, connection_id, signature)

    def _send_and_process_result(self, content, connection_id, signature):
        fut = self._service.send(validator_pb2.Message.TP_PROCESS_REQUEST,
                                 content,
                                 connection_id=connection_id,
                                 callback=self._future_done_callback)
        if connection_id in self._open_futures:
            self._open_futures[connection_id].update({signature: fut})
        else:
            self._open_futures[connection_id] = \
                {signature: fut}

    def remove_broken_connection(self, connection_id):
        if connection_id not in self._open_futures:
            # Connection has already been removed.
            return
        self._processors.remove(connection_id)
        futures_to_set = [
            self._open_futures[connection_id][key]
            for key in self._open_futures[connection_id]
        ]

        response = processor_pb2.TpProcessResponse(
            status=processor_pb2.TpProcessResponse.INTERNAL_ERROR)
        result = FutureResult(
            message_type=validator_pb2.Message.TP_PROCESS_RESPONSE,
            content=response.SerializeToString(),
            connection_id=connection_id)
        for fut in futures_to_set:
            fut.set_result(result)
            self._future_done_callback(fut.request, result)

    def is_done(self):
        return self._done and len(self._waiters_by_type) == 0

    def cancel(self):
        for waiter in self._waiters_by_type.values():
            waiter.cancel()
        self._scheduler.cancel()
示例#18
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _validate_batches_in_block(self, blkw, prev_state_root):
        """
        Validate all batches in the block. This includes:
            - Validating all transaction dependencies are met
            - Validating there are no duplicate batches or transactions
            - Validating execution of all batches in the block produces the
              correct state root hash

        Args:
            blkw: the block of batches to validate
            prev_state_root: the state root to execute transactions on top of

        Raises:
            BlockValidationError:
                If validation fails, raises this error with the reason.
            MissingDependency:
                Validation failed because of a missing dependency.
            DuplicateTransaction:
                Validation failed because of a duplicate transaction.
            DuplicateBatch:
                Validation failed because of a duplicate batch.
        """
        if not blkw.block.batches:
            return

        try:
            chain_commit_state = ChainCommitState(
                blkw.previous_block_id, self._block_cache,
                self._block_cache.block_store)

            scheduler = self._transaction_executor.create_scheduler(
                self._squash_handler, prev_state_root)
            self._transaction_executor.execute(scheduler)

            chain_commit_state.check_for_duplicate_batches(blkw.block.batches)

            transactions = []
            for batch in blkw.block.batches:
                transactions.extend(batch.transactions)

            chain_commit_state.check_for_duplicate_transactions(transactions)

            chain_commit_state.check_for_transaction_dependencies(transactions)

            for batch, has_more in look_ahead(blkw.block.batches):
                if has_more:
                    scheduler.add_batch(batch)
                else:
                    scheduler.add_batch(batch, blkw.state_root_hash)

        except (DuplicateBatch, DuplicateTransaction,
                MissingDependency) as err:
            scheduler.cancel()
            raise BlockValidationError("Block {} failed validation: {}".format(
                blkw, err))

        except Exception:
            scheduler.cancel()
            raise

        scheduler.finalize()
        scheduler.complete(block=True)
        state_hash = None

        for batch in blkw.batches:
            batch_result = scheduler.get_batch_execution_result(
                batch.header_signature)
            if batch_result is not None and batch_result.is_valid:
                txn_results = \
                    scheduler.get_transaction_execution_results(
                        batch.header_signature)
                blkw.execution_results.extend(txn_results)
                state_hash = batch_result.state_hash
                blkw.num_transactions += len(batch.transactions)
            else:
                raise BlockValidationError(
                    "Block {} failed validation: Invalid batch "
                    "{}".format(blkw, batch))

        if blkw.state_root_hash != state_hash:
            raise BlockValidationError(
                "Block {} failed state root hash validation. Expected {}"
                " but got {}".format(blkw, blkw.state_root_hash, state_hash))

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return self._validation_rule_enforcer.validate(
                blkw, prev_state_root)
        return True

    def validate_block(self, blkw, chain_head=None):
        if blkw.status == BlockStatus.Valid:
            return
        elif blkw.status == BlockStatus.Invalid:
            raise BlockValidationError(
                'Block {} is already invalid'.format(blkw))

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor'.format(
                        blkw))

            if not self._validate_permissions(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed permission validation'.format(blkw))

            try:
                prev_block = self._block_cache[blkw.previous_block_id]
            except KeyError:
                prev_block = None

            consensus = self._load_consensus(prev_block)
            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                raise BlockValidationError(
                    'Block {} failed {} consensus validation'.format(
                        blkw, consensus))

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed on-chain validation rules'.format(blkw))

            self._validate_batches_in_block(blkw, prev_state_root)

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid

        except BlockValidationError as err:
            blkw.status = BlockStatus.Invalid
            raise err

        except ChainHeadUpdated as e:
            raise e

        except Exception as e:
            LOGGER.exception(
                "Unhandled exception BlockValidator.validate_block()")
            raise e

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationError
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Failed to build fork diff due to missing predecessor: %s",
                    blk)

                # Mark all blocks in the longer chain since the invalid block
                # as invalid.
                for blk in fork_diff:
                    blk.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Failed to build fork diff: block {} missing predecessor'.
                    format(blk))

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(self, new_blkw, cur_blkw,
                                             new_chain, cur_chain):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to wrong genesis {}'.format(
                        cur_blkw, new_blkw))

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor {}'.format(
                        new_blkw, new_blkw.previous_block_id))

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = self._load_consensus(chain_head)
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    def _load_consensus(self, block):
        """Load the consensus module using the state as of the given block."""
        if block is not None:
            return ConsensusFactory.get_configured_consensus_module(
                block.header_signature,
                BlockWrapper.state_view_for_block(block,
                                                  self._state_view_factory))
        return ConsensusFactory.get_consensus_module('genesis')

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(self, blocks, callback):
        for block in blocks:
            if self.in_process(block.header_signature):
                LOGGER.debug("Block already in process: %s", block)
                continue

            if self.in_process(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' in process,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            if self.in_pending(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' is pending,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            LOGGER.debug("Adding block %s for processing", block.identifier)

            # Add the block to the set of blocks being processed
            self._blocks_processing.add(block.identifier)

            # Schedule the block for processing
            self._thread_pool.submit(self.process_block_verification, block,
                                     self._wrap_callback(block, callback))

    def _wrap_callback(self, block, callback):
        # Internal cleanup after verification
        def wrapper(commit_new_block, result):
            LOGGER.debug("Removing block from processing %s",
                         block.identifier[:6])
            try:
                self._blocks_processing.remove(block.identifier)
            except KeyError:
                LOGGER.warning(
                    "Tried to remove block from in process but it"
                    " wasn't in processes: %s", block.identifier)

            # If the block is invalid, mark all descendant blocks as invalid
            # and remove from pending.
            if block.status == BlockStatus.Valid:
                blocks_now_ready = self._blocks_pending.pop(
                    block.identifier, [])
                self.submit_blocks_for_verification(blocks_now_ready, callback)

            else:
                # Get all the pending blocks that can now be processed
                blocks_now_invalid = self._blocks_pending.pop(
                    block.identifier, [])

                while blocks_now_invalid:
                    invalid_block = blocks_now_invalid.pop()
                    invalid_block.status = BlockStatus.Invalid

                    LOGGER.debug('Marking descendant block invalid: %s',
                                 invalid_block)

                    # Get descendants of the descendant
                    blocks_now_invalid.extend(
                        self._blocks_pending.pop(invalid_block.identifier, []))

            callback(commit_new_block, result)

        return wrapper

    def in_process(self, block_id):
        return block_id in self._blocks_processing

    def in_pending(self, block_id):
        return block_id in self._blocks_pending

    def _add_block_to_pending(self, block):
        previous = block.previous_block_id
        self._blocks_pending.append(previous, block)

    def process_block_verification(self, block, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            try:
                # Get all the blocks since the greatest common height from the
                # longer chain.
                if self._compare_chain_height(current_block, new_block):
                    current_block, result.current_chain =\
                        self._build_fork_diff_to_common_height(
                            current_block, new_block)
                else:
                    new_block, result.new_chain =\
                        self._build_fork_diff_to_common_height(
                            new_block, current_block)

                # Add blocks to the two chains until a common ancestor is found
                # or raise an exception if no common ancestor is found
                self._extend_fork_diff_to_common_ancestor(
                    new_block, current_block, result.new_chain,
                    result.current_chain)
            except BlockValidationError as err:
                LOGGER.warning('%s', err)
                callback(False, result)
                return

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    try:
                        self.validate_block(blk, chain_head)
                    except BlockValidationError as err:
                        LOGGER.warning('Block %s failed validation: %s', blk,
                                       err)
                        valid = False
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(
                    max(len(result.new_chain), len(result.current_chain))):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s", num,
                    cur, new)

            commit_new_chain = self._compare_forks_consensus(chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except ChainHeadUpdated:
            callback(False, result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)
示例#19
0
    def __init__(self,
                 block_cache,
                 block_sender,
                 state_view_factory,
                 transaction_executor,
                 chain_head_lock,
                 on_chain_updated,
                 squash_handler,
                 chain_id_manager,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 chain_observers,
                 thread_pool=None,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_sender: an interface object used to send blocks to the
                network.
            state_view_factory: The factory object to create
            transaction_executor: The TransactionExecutor used to produce
                schedulers for batch validation.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
                 squash_handler: a parameter passed when creating transaction
                 schedulers.
            chain_id_manager: The ChainIdManager instance.
            identity_signer: Private key for signing blocks.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._block_sender = block_sender
        self._transaction_executor = transaction_executor
        self._notify_on_chain_updated = on_chain_updated
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._blocks_processing = {}  # a set of blocks that are
        # currently being processed.
        self._blocks_pending = {}  # set of blocks that the previous block
        # is being processed. Once that completes this block will be
        # scheduled for validation.
        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._permission_verifier = permission_verifier
        self._chain_observers = chain_observers

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()

        self._block_queue = queue.Queue()
        self._thread_pool = \
            InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool
        self._chain_thread = None

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()
示例#20
0
class ChainController(object):
    """
    To evaluating new blocks to determine if they should extend or replace
    the current chain. If they are valid extend the chain.
    """
    def __init__(self,
                 block_cache,
                 block_sender,
                 state_view_factory,
                 transaction_executor,
                 chain_head_lock,
                 on_chain_updated,
                 squash_handler,
                 chain_id_manager,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 chain_observers,
                 thread_pool=None,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_sender: an interface object used to send blocks to the
                network.
            state_view_factory: The factory object to create
            transaction_executor: The TransactionExecutor used to produce
                schedulers for batch validation.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
                 squash_handler: a parameter passed when creating transaction
                 schedulers.
            chain_id_manager: The ChainIdManager instance.
            identity_signer: Private key for signing blocks.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._block_sender = block_sender
        self._transaction_executor = transaction_executor
        self._notify_on_chain_updated = on_chain_updated
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._blocks_processing = {}  # a set of blocks that are
        # currently being processed.
        self._blocks_pending = {}  # set of blocks that the previous block
        # is being processed. Once that completes this block will be
        # scheduled for validation.
        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._permission_verifier = permission_verifier
        self._chain_observers = chain_observers

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()

        self._block_queue = queue.Queue()
        self._thread_pool = \
            InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool
        self._chain_thread = None

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()

    def _set_chain_head_from_block_store(self):
        try:
            self._chain_head = self._block_store.chain_head
            if self._chain_head is not None:
                LOGGER.info("Chain controller initialized with chain head: %s",
                            self._chain_head)
                self._chain_head_gauge.set_value(
                    self._chain_head.identifier[:8])
        except Exception:
            LOGGER.exception(
                "Invalid block store. Head of the block chain cannot be"
                " determined")
            raise

    def start(self):
        self._set_chain_head_from_block_store()
        self._notify_on_chain_updated(self._chain_head)

        self._chain_thread = _ChainThread(chain_controller=self,
                                          block_queue=self._block_queue,
                                          block_cache=self._block_cache)
        self._chain_thread.start()

    def stop(self):
        if self._chain_thread is not None:
            self._chain_thread.stop()
            self._chain_thread = None

        if self._thread_pool is not None:
            self._thread_pool.shutdown(wait=True)

    def queue_block(self, block):
        """
        New block has been received, queue it with the chain controller
        for processing.
        """
        self._block_queue.put(block)

    @property
    def chain_head(self):
        return self._chain_head

    def _submit_blocks_for_verification(self, blocks):
        for blkw in blocks:
            state_view = BlockWrapper.state_view_for_block(
                self.chain_head, self._state_view_factory)
            consensus_module = \
                ConsensusFactory.get_configured_consensus_module(
                    self.chain_head.header_signature,
                    state_view)

            validator = BlockValidator(
                consensus_module=consensus_module,
                new_block=blkw,
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                done_cb=self.on_block_validated,
                executor=self._transaction_executor,
                squash_handler=self._squash_handler,
                identity_signer=self._identity_signer,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                permission_verifier=self._permission_verifier)
            self._blocks_processing[blkw.block.header_signature] = validator
            self._thread_pool.submit(validator.run)

    def on_block_validated(self, commit_new_block, result):
        """Message back from the block validator, that the validation is
        complete
        Args:
        commit_new_block (Boolean): whether the new block should become the
        chain head or not.
        result (Dict): Map of the results of the fork resolution.
        Returns:
            None
        """
        try:
            with self._lock:
                new_block = result["new_block"]
                LOGGER.info("on_block_validated: %s", new_block)

                # remove from the processing list
                del self._blocks_processing[new_block.identifier]

                # Remove this block from the pending queue, obtaining any
                # immediate descendants of this block in the process.
                descendant_blocks = \
                    self._blocks_pending.pop(new_block.identifier, [])

                # if the head has changed, since we started the work.
                if result["chain_head"].identifier !=\
                        self._chain_head.identifier:
                    LOGGER.info(
                        'Chain head updated from %s to %s while processing '
                        'block: %s', result["chain_head"], self._chain_head,
                        new_block)

                    # If any immediate descendant blocks arrived while this
                    # block was being processed, then submit them for
                    # verification.  Otherwise, add this block back to the
                    # pending queue and resubmit it for verification.
                    if descendant_blocks:
                        LOGGER.debug('Verify descendant blocks: %s (%s)',
                                     new_block, [
                                         block.identifier[:8]
                                         for block in descendant_blocks
                                     ])
                        self._submit_blocks_for_verification(descendant_blocks)
                    else:
                        LOGGER.debug('Verify block again: %s ', new_block)
                        self._blocks_pending[new_block.identifier] = []
                        self._submit_blocks_for_verification([new_block])

                # If the head is to be updated to the new block.
                elif commit_new_block:
                    with self._chain_head_lock:
                        self._chain_head = new_block

                        # update the the block store to have the new chain
                        self._block_store.update_chain(result["new_chain"],
                                                       result["cur_chain"])

                        # make sure old chain is in the block_caches
                        for block in result["cur_chain"]:
                            if block.header_signature not in self._block_cache:
                                self._block_cache[block.header_signature] = \
                                    block

                        LOGGER.info("Chain head updated to: %s",
                                    self._chain_head)

                        self._chain_head_gauge.set_value(
                            self._chain_head.identifier[:8])

                        self._committed_transactions_count.inc(
                            result["num_transactions"])

                        self._block_num_gauge.set_value(
                            self._chain_head.block_num)

                        # tell the BlockPublisher else the chain is updated
                        self._notify_on_chain_updated(
                            self._chain_head, result["committed_batches"],
                            result["uncommitted_batches"])

                        for batch in new_block.batches:
                            if batch.trace:
                                LOGGER.debug("TRACE %s: %s",
                                             batch.header_signature,
                                             self.__class__.__name__)

                    # Submit any immediate descendant blocks for verification
                    LOGGER.debug(
                        'Verify descendant blocks: %s (%s)', new_block,
                        [block.identifier[:8] for block in descendant_blocks])
                    self._submit_blocks_for_verification(descendant_blocks)

                    receipts = self._make_receipts(result["execution_results"])
                    # Update all chain observers
                    for observer in self._chain_observers:
                        observer.chain_update(new_block, receipts)

                # If the block was determine to be invalid.
                elif new_block.status == BlockStatus.Invalid:
                    # Since the block is invalid, we will never accept any
                    # blocks that are descendants of this block.  We are going
                    # to go through the pending blocks and remove all
                    # descendants we find and mark the corresponding block
                    # as invalid.
                    while descendant_blocks:
                        pending_block = descendant_blocks.pop()
                        pending_block.status = BlockStatus.Invalid

                        LOGGER.debug('Marking descendant block invalid: %s',
                                     pending_block)

                        descendant_blocks.extend(
                            self._blocks_pending.pop(pending_block.identifier,
                                                     []))

                # The block is otherwise valid, but we have determined we
                # don't want it as the chain head.
                else:
                    LOGGER.info('Rejected new chain head: %s', new_block)

                    # Submit for verification any immediate descendant blocks
                    # that arrived while we were processing this block.
                    LOGGER.debug(
                        'Verify descendant blocks: %s (%s)', new_block,
                        [block.identifier[:8] for block in descendant_blocks])
                    self._submit_blocks_for_verification(descendant_blocks)

        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_validated()")

    def on_block_received(self, block):
        try:
            with self._lock:
                if self.has_block(block.header_signature):
                    # do we already have this block
                    return

                if self.chain_head is None:
                    self._set_genesis(block)
                    return

                # If we are already currently processing this block, then
                # don't bother trying to schedule it again.
                if block.identifier in self._blocks_processing:
                    return

                self._block_cache[block.identifier] = block
                self._blocks_pending[block.identifier] = []
                LOGGER.debug("Block received: %s", block)
                if block.previous_block_id in self._blocks_processing or \
                        block.previous_block_id in self._blocks_pending:
                    LOGGER.debug('Block pending: %s', block)
                    # if the previous block is being processed, put it in a
                    # wait queue, Also need to check if previous block is
                    # in the wait queue.
                    pending_blocks = \
                        self._blocks_pending.get(block.previous_block_id,
                                                 [])
                    # Though rare, the block may already be in the
                    # pending_block list and should not be re-added.
                    if block not in pending_blocks:
                        pending_blocks.append(block)

                    self._blocks_pending[block.previous_block_id] = \
                        pending_blocks
                else:
                    # schedule this block for validation.
                    self._submit_blocks_for_verification([block])
        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_received()")

    def has_block(self, block_id):
        with self._lock:
            if block_id in self._block_cache:
                return True

            if block_id in self._blocks_processing:
                return True

            if block_id in self._blocks_pending:
                return True

            return False

    def _set_genesis(self, block):
        # This is used by a non-genesis journal when it has received the
        # genesis block from the genesis validator
        if block.previous_block_id == NULL_BLOCK_IDENTIFIER:
            chain_id = self._chain_id_manager.get_block_chain_id()
            if chain_id is not None and chain_id != block.identifier:
                LOGGER.warning(
                    "Block id does not match block chain id %s. "
                    "Cannot set initial chain head.: %s", chain_id[:8],
                    block.identifier[:8])
            else:
                state_view = self._state_view_factory.create_view()
                consensus_module = \
                    ConsensusFactory.get_configured_consensus_module(
                        NULL_BLOCK_IDENTIFIER,
                        state_view)

                validator = BlockValidator(
                    consensus_module=consensus_module,
                    new_block=block,
                    block_cache=self._block_cache,
                    state_view_factory=self._state_view_factory,
                    done_cb=self.on_block_validated,
                    executor=self._transaction_executor,
                    squash_handler=self._squash_handler,
                    identity_signer=self._identity_signer,
                    data_dir=self._data_dir,
                    config_dir=self._config_dir,
                    permission_verifier=self._permission_verifier)

                valid = validator.validate_block(block)
                if valid:
                    if chain_id is None:
                        self._chain_id_manager.save_block_chain_id(
                            block.identifier)
                    self._block_store.update_chain([block])
                    self._chain_head = block
                    self._notify_on_chain_updated(self._chain_head)
                else:
                    LOGGER.warning(
                        "The genesis block is not valid. Cannot "
                        "set chain head: %s", block)

        else:
            LOGGER.warning(
                "Cannot set initial chain head, this is not a "
                "genesis block: %s", block)

    def _make_receipts(self, results):
        receipts = []
        for result in results:
            receipt = TransactionReceipt()
            receipt.data.extend([data for data in result.data])
            receipt.state_changes.extend(result.state_changes)
            receipt.events.extend(result.events)
            receipt.transaction_id = result.signature
            receipts.append(receipt)
        return receipts
示例#21
0
class TransactionExecutorThread(object):
    """A thread of execution controlled by the TransactionExecutor.
    Provides the functionality that the journal can process on several
    schedulers at once.
    """

    def __init__(self,
                 service,
                 context_manager,
                 scheduler,
                 processors,
                 waiting_threadpool,
                 settings_view_factory,
                 invalid_observers,
                 metrics_registry=None):
        """
        Args:
            service (Interconnect): The zmq internal interface
            context_manager (ContextManager): The cached state for tps
            scheduler (scheduler.Scheduler): Provides the order of txns to
                execute.
            processors (ProcessorIteratorCollection): Provides the next
                transaction processor to send to.
            waiters_by_type (_WaitersByType): Queues up transactions based on
                processor type.
            waiting_threadpool (ThreadPoolExecutor): A thread pool to run
                indefinite waiting functions in.
            settings_view_factory (SettingsViewFactory): Read the configuration
                state
        Attributes:
            _tp_settings_key (str): the key used to reference the part of state
                where the list of required transaction processors are.
        """
        super(TransactionExecutorThread, self).__init__()
        self._service = service
        self._context_manager = context_manager
        self._scheduler = scheduler
        self._processors = processors
        self._settings_view_factory = settings_view_factory
        self._tp_settings_key = "sawtooth.validator.transaction_families"
        self._waiters_by_type = _WaitersByType()
        self._waiting_threadpool = waiting_threadpool
        self._done = False
        self._invalid_observers = invalid_observers
        self._open_futures = {}
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._transaction_execution_count = CounterWrapper(
                metrics_registry.counter('transaction_execution_count'))
        else:
            self._transaction_execution_count = CounterWrapper()

    def _future_done_callback(self, request, result):
        """
        :param request (bytes):the serialized request
        :param result (FutureResult):
        """
        req = processor_pb2.TpProcessRequest()
        req.ParseFromString(request)
        response = processor_pb2.TpProcessResponse()
        response.ParseFromString(result.content)

        if result.connection_id in self._open_futures and \
                req.signature in self._open_futures[result.connection_id]:
            del self._open_futures[result.connection_id][req.signature]

        if response.status == processor_pb2.TpProcessResponse.OK:
            state_sets, state_deletes, events, data = \
                self._context_manager.get_execution_results(req.context_id)

            state_changes = [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    value=value,
                    type=transaction_receipt_pb2.StateChange.SET)
                for addr, value in state_sets.items()
            ] + [
                transaction_receipt_pb2.StateChange(
                    address=addr,
                    type=transaction_receipt_pb2.StateChange.DELETE)
                for addr in state_deletes
            ]

            self._scheduler.set_transaction_execution_result(
                txn_signature=req.signature,
                is_valid=True,
                context_id=req.context_id,
                state_changes=state_changes,
                events=events,
                data=data)

        elif response.status == processor_pb2.TpProcessResponse.INTERNAL_ERROR:
            LOGGER.error(
                "Transaction processor internal error: %s "
                "(transaction: %s, name: %s, version: %s)",
                response.message,
                req.signature,
                req.header.family_name,
                req.header.family_version)

            processor_type = processor_iterator.ProcessorType(
                req.header.family_name,
                req.header.family_version)

            self._execute_or_wait_for_processor_type(
                processor_type, request, req.signature)

        else:
            self._context_manager.delete_contexts(
                context_id_list=[req.context_id])

            self._scheduler.set_transaction_execution_result(
                txn_signature=req.signature,
                is_valid=False,
                context_id=req.context_id,
                error_message=response.message,
                error_data=response.extended_data)

            for observer in self._invalid_observers:
                observer.notify_txn_invalid(
                    req.signature,
                    response.message,
                    response.extended_data)

    def execute_thread(self):
        try:
            self._execute_schedule()
        except Exception as exc:  # pylint: disable=broad-except
            LOGGER.exception(
                "Unhandled exception while executing schedule: %s", exc)

    def _execute_schedule(self):
        for txn_info in self._scheduler:
            self._transaction_execution_count.inc()

            txn = txn_info.txn
            header = transaction_pb2.TransactionHeader()
            header.ParseFromString(txn.header)

            processor_type = processor_iterator.ProcessorType(
                header.family_name,
                header.family_version)

            config = self._settings_view_factory.create_settings_view(
                txn_info.state_hash)

            transaction_families = config.get_setting(
                key=self._tp_settings_key,
                default_value="[]")

            # After reading the transaction families required in configuration
            # try to json.loads them into a python object
            # If there is a misconfiguration, proceed as if there is no
            # configuration.
            try:
                transaction_families = json.loads(transaction_families)
                required_transaction_processors = [
                    processor_iterator.ProcessorType(
                        d.get('family'),
                        d.get('version')) for d in transaction_families]
            except ValueError:
                LOGGER.error("sawtooth.validator.transaction_families "
                             "misconfigured. Expecting a json array, found"
                             " %s", transaction_families)
                required_transaction_processors = []

            # First check if the transaction should be failed
            # based on configuration
            if required_transaction_processors and \
                    processor_type not in required_transaction_processors:
                # The txn processor type is not in the required
                # transaction processors so
                # failing transaction right away
                LOGGER.debug("failing transaction %s of type (name=%s,"
                             "version=%s) since it isn't"
                             " required in the configuration",
                             txn.header_signature,
                             processor_type.name,
                             processor_type.version)

                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue

            if processor_type in required_transaction_processors:
                # The txn processor type is in the required
                # transaction processors: check all the outputs of
                # the transaction match one namespace listed
                transaction_family = \
                    next(t for t in transaction_families
                         if t.get('family') == header.family_name and
                         t.get('version') == header.family_version)

                # if no namespaces are indicated, then the empty prefix is
                # inserted by default
                namespaces = transaction_family.get('namespaces', [''])
                if not isinstance(namespaces, list):
                    LOGGER.error("namespaces should be a list for "
                                 "transaction family (name=%s, version=%s)",
                                 processor_type.name,
                                 processor_type.version)
                prefixes = header.outputs
                bad_prefixes = [
                    prefix for prefix in prefixes
                    if not any(prefix.startswith(n) for n in namespaces)
                ]
                for prefix in bad_prefixes:
                    # log each
                    LOGGER.debug("failing transaction %s of type (name=%s,"
                                 "version=%s) because of no namespace listed "
                                 "in %s from the configuration settings can "
                                 "match the prefix %s",
                                 txn.header_signature,
                                 processor_type.name,
                                 processor_type.version,
                                 namespaces,
                                 prefix)

                if bad_prefixes:
                    self._scheduler.set_transaction_execution_result(
                        txn_signature=txn.header_signature,
                        is_valid=False,
                        context_id=None)
                    continue

            try:
                context_id = self._context_manager.create_context(
                    state_hash=txn_info.state_hash,
                    base_contexts=txn_info.base_context_ids,
                    inputs=list(header.inputs),
                    outputs=list(header.outputs))
            except KeyError:
                LOGGER.error(
                    "Error creating context for transaction %s, "
                    "scheduler provided a base context that was not "
                    "in the context manager.", txn.header_signature)
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            except CreateContextException:
                LOGGER.exception("Exception creating context")
                self._scheduler.set_transaction_execution_result(
                    txn_signature=txn.header_signature,
                    is_valid=False,
                    context_id=None)
                continue
            content = processor_pb2.TpProcessRequest(
                header=header,
                payload=txn.payload,
                signature=txn.header_signature,
                context_id=context_id).SerializeToString()

            # Since we have already checked if the transaction should be failed
            # all other cases should either be executed or waited for.
            self._execute_or_wait_for_processor_type(
                processor_type=processor_type,
                content=content,
                signature=txn.header_signature)

        self._done = True

    def _execute_or_wait_for_processor_type(
            self, processor_type, content, signature):
        processor = self._processors.get_next_of_type(
            processor_type=processor_type)
        if processor is None:
            LOGGER.debug("no transaction processors registered for "
                         "processor type %s", processor_type)
            if processor_type not in self._waiters_by_type:
                in_queue = queue.Queue()
                in_queue.put_nowait((content, signature))
                waiter = _Waiter(
                    self._send_and_process_result,
                    processor_type=processor_type,
                    processors=self._processors,
                    in_queue=in_queue,
                    waiters_by_type=self._waiters_by_type)
                self._waiters_by_type[processor_type] = waiter
                self._waiting_threadpool.submit(waiter.run_in_threadpool)
            else:
                self._waiters_by_type[processor_type].add_to_in_queue(
                    (content, signature))
        else:
            connection_id = processor.connection_id
            self._send_and_process_result(content, connection_id, signature)

    def _send_and_process_result(self, content, connection_id, signature):
        fut = self._service.send(
            validator_pb2.Message.TP_PROCESS_REQUEST,
            content,
            connection_id=connection_id,
            callback=self._future_done_callback)
        if connection_id in self._open_futures:
            self._open_futures[connection_id].update(
                {signature: fut})
        else:
            self._open_futures[connection_id] = \
                {signature: fut}

    def remove_broken_connection(self, connection_id):
        if connection_id not in self._open_futures:
            # Connection has already been removed.
            return
        self._processors.remove(connection_id)
        futures_to_set = [
            self._open_futures[connection_id][key]
            for key in self._open_futures[connection_id]
        ]

        response = processor_pb2.TpProcessResponse(
            status=processor_pb2.TpProcessResponse.INTERNAL_ERROR)
        result = FutureResult(
            message_type=validator_pb2.Message.TP_PROCESS_RESPONSE,
            content=response.SerializeToString(),
            connection_id=connection_id)
        for fut in futures_to_set:
            fut.set_result(result)
            self._future_done_callback(fut.request, result)

    def is_done(self):
        return self._done and len(self._waiters_by_type) == 0

    def cancel(self):
        for waiter in self._waiters_by_type.values():
            waiter.cancel()
        self._scheduler.cancel()
示例#22
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """

    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._settings_view_factory = SettingsViewFactory(state_view_factory)

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    @staticmethod
    def _validate_transactions_in_batch(batch, chain_commit_state):
        """Verify that all transactions in this batch are unique and that all
        transaction dependencies in this batch have been satisfied.

        :param batch: the batch to verify
        :param chain_commit_state: the current chain commit state to verify the
            batch against
        :return:
        Boolean: True if all dependencies are present and all transactions
        are unique.
        """
        for txn in batch.transactions:
            txn_hdr = TransactionHeader()
            txn_hdr.ParseFromString(txn.header)
            if chain_commit_state.has_transaction(txn.header_signature):
                LOGGER.debug(
                    "Batch invalid due to duplicate transaction: %s",
                    txn.header_signature[:8])
                return False
            for dep in txn_hdr.dependencies:
                if not chain_commit_state.has_transaction(dep):
                    LOGGER.debug(
                        "Batch invalid due to missing transaction dependency;"
                        " transaction %s depends on %s",
                        txn.header_signature[:8], dep[:8])
                    return False
        return True

    def _validate_batches_in_block(
        self, blkw, prev_state_root, chain_commit_state
    ):
        if blkw.block.batches:
            scheduler = self._transaction_executor.create_scheduler(
                self._squash_handler, prev_state_root)
            self._transaction_executor.execute(scheduler)
            try:
                for batch, has_more in look_ahead(blkw.block.batches):
                    if chain_commit_state.has_batch(
                            batch.header_signature):
                        LOGGER.debug("Block(%s) rejected due to duplicate "
                                     "batch, batch: %s", blkw,
                                     batch.header_signature[:8])
                        raise InvalidBatch()

                    # Verify dependencies and uniqueness
                    if self._validate_transactions_in_batch(
                        batch, chain_commit_state
                    ):
                        # Only add transactions to commit state if all
                        # transactions in the batch are good.
                        chain_commit_state.add_batch(
                            batch, add_transactions=True)
                    else:
                        raise InvalidBatch()

                    if has_more:
                        scheduler.add_batch(batch)
                    else:
                        scheduler.add_batch(batch, blkw.state_root_hash)
            except InvalidBatch:
                LOGGER.debug("Invalid batch %s encountered during "
                             "verification of block %s",
                             batch.header_signature[:8],
                             blkw)
                scheduler.cancel()
                return False
            except Exception:
                scheduler.cancel()
                raise

            scheduler.finalize()
            scheduler.complete(block=True)
            state_hash = None

            for batch in blkw.batches:
                batch_result = scheduler.get_batch_execution_result(
                    batch.header_signature)
                if batch_result is not None and batch_result.is_valid:
                    txn_results = \
                        scheduler.get_transaction_execution_results(
                            batch.header_signature)
                    blkw.execution_results.extend(txn_results)
                    state_hash = batch_result.state_hash
                    blkw.num_transactions += len(batch.transactions)
                else:
                    return False
            if blkw.state_root_hash != state_hash:
                LOGGER.debug("Block(%s) rejected due to state root hash "
                             "mismatch: %s != %s", blkw, blkw.state_root_hash,
                             state_hash)
                return False
        return True

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return enforce_validation_rules(
                self._settings_view_factory.create_settings_view(
                    prev_state_root),
                blkw.header.signer_public_key,
                blkw.batches)
        return True

    def validate_block(self, blkw, consensus, chain_head=None, chain=None):
        if blkw.status == BlockStatus.Valid:
            return True
        elif blkw.status == BlockStatus.Invalid:
            return False

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            if chain is None:
                chain = []
            chain_commit_state = ChainCommitState(
                self._block_cache.block_store, chain)

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                LOGGER.debug(
                    "Block rejected due to missing predecessor: %s", blkw)
                return False

            if not self._validate_permissions(blkw, prev_state_root):
                blkw.status = BlockStatus.Invalid
                return False

            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                blkw.status = BlockStatus.Invalid
                return False

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                blkw.status = BlockStatus.Invalid
                return False

            if not self._validate_batches_in_block(
                blkw, prev_state_root, chain_commit_state
            ):
                blkw.status = BlockStatus.Invalid
                return False

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid
            return True

        except ChainHeadUpdated as e:
            raise e

        except Exception:
            LOGGER.exception(
                "Unhandled exception BlockPublisher.validate_block()")
            return False

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationAborted
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Failed to build fork diff due to missing predecessor: %s",
                    blk)

                # Mark all blocks in the longer chain since the invalid block
                # as invalid.
                for blk in fork_diff:
                    blk.status = BlockStatus.Invalid
                raise BlockValidationAborted()

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(
        self, new_blkw, cur_blkw, new_chain, cur_chain
    ):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                LOGGER.info(
                    "Block rejected due to wrong genesis: %s %s",
                    cur_blkw, new_blkw)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                LOGGER.info(
                    "Block %s rejected due to missing predecessor %s",
                    new_blkw,
                    new_blkw.previous_block_id)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, consensus, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(
        self, blocks, consensus, callback
    ):
        for block in blocks:
            self._thread_pool.submit(
                self.process_block_verification,
                block, consensus, callback)

    def process_block_verification(self, block, consensus, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            # Get all the blocks since the greatest common height from the
            # longer chain.
            if self._compare_chain_height(current_block, new_block):
                current_block, result.current_chain =\
                    self._build_fork_diff_to_common_height(
                        current_block, new_block)
            else:
                new_block, result.new_chain =\
                    self._build_fork_diff_to_common_height(
                        new_block, current_block)

            # Add blocks to the two chains until a common ancestor is found
            # or raise an exception if no common ancestor is found
            self._extend_fork_diff_to_common_ancestor(
                new_block, current_block,
                result.new_chain, result.current_chain)

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    if not self.validate_block(
                        blk, consensus, chain_head,
                        result.current_chain
                    ):
                        LOGGER.info("Block validation failed: %s", blk)
                        valid = False
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(max(
                len(result.new_chain), len(result.current_chain)
            )):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s",
                    num, cur, new)

            commit_new_chain = self._compare_forks_consensus(
                consensus, chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except BlockValidationAborted:
            callback(False, result)
            return
        except ChainHeadUpdated:
            callback(False, result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)
示例#23
0
    def __init__(self,
                 block_cache,
                 block_validator,
                 state_view_factory,
                 chain_head_lock,
                 on_chain_updated,
                 chain_id_manager,
                 data_dir,
                 config_dir,
                 chain_observers,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_validator: The object to use for submitting block validation
                work.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
            chain_id_manager: The ChainIdManager instance.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._notify_on_chain_updated = on_chain_updated
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._blocks_processing = {}  # a set of blocks that are
        # currently being processed.
        self._blocks_pending = {}  # set of blocks that the previous block
        # is being processed. Once that completes this block will be
        # scheduled for validation.
        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._chain_observers = chain_observers
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
            self._blocks_considered_count = CounterWrapper(
                metrics_registry.counter('blocks_considered_count'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
            self._blocks_considered_count = CounterWrapper()

        self._block_queue = queue.Queue()
        self._chain_thread = None

        self._block_validator = block_validator

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()
示例#24
0
    def __init__(self,
                 consensus_module,
                 block_cache,
                 new_block,
                 state_view_factory,
                 done_cb,
                 executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None):
        """Initialize the BlockValidator
        Args:
             consensus_module: The consensus module that contains
             implementation of the consensus algorithm to use for block
             validation.
             block_cache: The cache of all recent blocks and the processing
             state associated with them.
             new_block: The block to validate.
             state_view_factory: The factory object to create.
             done_cb: The method to call when block validation completed
             executor: The thread pool to process block validations.
             squash_handler: A parameter passed when creating transaction
             schedulers.
             identity_signer: A cryptographic signer for signing blocks.
             data_dir: Path to location where persistent data for the
             consensus module can be stored.
             config_dir: Path to location where config data for the
             consensus module can be found.
        Returns:
            None
        """
        self._consensus_module = consensus_module
        self._block_cache = block_cache
        self._chain_commit_state = ChainCommitState(
            self._block_cache.block_store, [])
        self._new_block = new_block

        # Set during execution of the of the  BlockValidation to the current
        # chain_head at that time.
        self._chain_head = None

        self._state_view_factory = state_view_factory
        self._done_cb = done_cb
        self._executor = executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._result = {
            'new_block': new_block,
            'chain_head': None,
            'new_chain': [],
            'cur_chain': [],
            'committed_batches': [],
            'uncommitted_batches': [],
            'num_transactions': 0
        }
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = \
            ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()
示例#25
0
    def __init__(self,
                 block_cache,
                 block_validator,
                 state_view_factory,
                 chain_head_lock,
                 on_chain_updated,
                 chain_id_manager,
                 data_dir,
                 config_dir,
                 chain_observers,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_validator: The object to use for submitting block validation
                work.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
            chain_id_manager: The ChainIdManager instance.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._notify_on_chain_updated = on_chain_updated
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._chain_observers = chain_observers
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
            self._blocks_considered_count = CounterWrapper(
                metrics_registry.counter('blocks_considered_count'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
            self._blocks_considered_count = CounterWrapper()

        self._block_queue = queue.Queue()
        self._chain_thread = None

        self._block_validator = block_validator

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()
示例#26
0
class BlockPublisher(object):
    """
    Responsible for generating new blocks and publishing them when the
    Consensus deems it appropriate.
    """

    def __init__(self,
                 transaction_executor,
                 block_cache,
                 state_view_factory,
                 settings_cache,
                 block_sender,
                 batch_sender,
                 squash_handler,
                 chain_head,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 check_publish_block_frequency,
                 batch_observers,
                 batch_injector_factory=None,
                 metrics_registry=None):
        """
        Initialize the BlockPublisher object

        Args:
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            block_cache (:obj:`BlockCache`): A BlockCache instance.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            chain_head (:obj:`BlockWrapper`): The initial chain head.
            identity_signer (:obj:`Signer`): Cryptographic signer for signing
                blocks
            data_dir (str): path to location where persistent data for the
                consensus module can be stored.
            config_dir (str): path to location where configuration can be
                found.
            batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory
                for creating BatchInjectors.
            metrics_registry (MetricsRegistry): Metrics registry used to
                create pending batch gauge
        """
        self._lock = RLock()
        self._candidate_block = None  # _CandidateBlock helper,
        # the next block in potential chain
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._settings_cache = settings_cache
        self._transaction_executor = transaction_executor
        self._block_sender = block_sender
        self._batch_publisher = BatchPublisher(identity_signer, batch_sender)
        self._pending_batches = []  # batches we are waiting for validation,
        # arranged in the order of batches received.
        self._pending_batch_ids = []
        self._publish_count_average = _RollingAverage(
            NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT)

        self._chain_head = chain_head  # block (BlockWrapper)
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier
        self._batch_injector_factory = batch_injector_factory

        # For metric gathering
        if metrics_registry:
            self._pending_batch_gauge = GaugeWrapper(
                metrics_registry.gauge('pending_batch_gauge'))
            self._blocks_published_count = CounterWrapper(
                metrics_registry.counter('blocks_published_count'))
        else:
            self._blocks_published_count = CounterWrapper()
            self._pending_batch_gauge = GaugeWrapper()

        self._batch_queue = queue.Queue()
        self._queued_batch_ids = []
        self._batch_observers = batch_observers
        self._check_publish_block_frequency = check_publish_block_frequency
        self._publisher_thread = None

        # A series of states that allow us to check for condition changes.
        # These can be used to log only at the boundary of condition changes.
        self._logging_states = _PublisherLoggingStates()

    def start(self):
        self._publisher_thread = _PublisherThread(
            block_publisher=self,
            batch_queue=self._batch_queue,
            check_publish_block_frequency=self._check_publish_block_frequency)
        self._publisher_thread.start()

    def stop(self):
        if self._publisher_thread is not None:
            self._publisher_thread.stop()
            self._publisher_thread = None

    def queue_batch(self, batch):
        """
        New batch has been received, queue it with the BlockPublisher for
        inclusion in the next block.
        """
        self._batch_queue.put(batch)
        self._queued_batch_ids.append(batch.header_signature)
        for observer in self._batch_observers:
            observer.notify_batch_pending(batch)

    def can_accept_batch(self):
        return len(self._pending_batches) < self._get_current_queue_limit()

    def _get_current_queue_limit(self):
        # Limit the number of batches to 2 times the publishing average.  This
        # allows the queue to grow geometrically, if the queue is drained.
        return 2 * self._publish_count_average.value

    def get_current_queue_info(self):
        """Returns a tuple of the current size of the pending batch queue
        and the current queue limit.
        """
        return (len(self._pending_batches), self._get_current_queue_limit())

    @property
    def chain_head_lock(self):
        return self._lock

    def _build_candidate_block(self, chain_head):
        """ Build a candidate block and construct the consensus object to
        validate it.
        :param chain_head: The block to build on top of.
        :return: (BlockBuilder) - The candidate block in a BlockBuilder
        wrapper.
        """
        state_view = BlockWrapper.state_view_for_block(
            chain_head,
            self._state_view_factory)
        consensus_module = ConsensusFactory.get_configured_consensus_module(
            chain_head.header_signature,
            state_view)

        # using chain_head so so we can use the setting_cache
        max_batches = int(self._settings_cache.get_setting(
            'sawtooth.publisher.max_batches_per_block',
            chain_head.state_root_hash,
            default_value=0))

        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = consensus_module.\
            BlockPublisher(block_cache=self._block_cache,
                           state_view_factory=self._state_view_factory,
                           batch_publisher=self._batch_publisher,
                           data_dir=self._data_dir,
                           config_dir=self._config_dir,
                           validator_id=public_key)

        batch_injectors = []
        if self._batch_injector_factory is not None:
            batch_injectors = self._batch_injector_factory.create_injectors(
                chain_head.identifier)
            if batch_injectors:
                LOGGER.debug("Loaded batch injectors: %s", batch_injectors)

        block_header = BlockHeader(
            block_num=chain_head.block_num + 1,
            previous_block_id=chain_head.header_signature,
            signer_public_key=public_key)
        block_builder = BlockBuilder(block_header)

        if not consensus.initialize_block(block_builder.block_header):
            if not self._logging_states.consensus_not_ready:
                self._logging_states.consensus_not_ready = True
                LOGGER.debug("Consensus not ready to build candidate block.")
            return None

        if self._logging_states.consensus_not_ready:
            self._logging_states.consensus_not_ready = False
            LOGGER.debug("Consensus is ready to build candidate block.")

        # create a new scheduler
        scheduler = self._transaction_executor.create_scheduler(
            self._squash_handler, chain_head.state_root_hash)

        # build the TransactionCommitCache
        committed_txn_cache = TransactionCommitCache(
            self._block_cache.block_store)

        self._transaction_executor.execute(scheduler)
        self._candidate_block = _CandidateBlock(
            self._block_cache.block_store,
            consensus, scheduler,
            committed_txn_cache,
            block_builder,
            max_batches,
            batch_injectors,
            SettingsView(state_view),
            public_key)

        for batch in self._pending_batches:
            if self._candidate_block.can_add_batch:
                self._candidate_block.add_batch(batch)
            else:
                break

    def on_batch_received(self, batch):
        """
        A new batch is received, send it for validation
        :param batch: the new pending batch
        :return: None
        """
        with self._lock:
            self._queued_batch_ids = self._queued_batch_ids[:1]
            if self._permission_verifier.is_batch_signer_authorized(batch):
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)
                self._pending_batch_gauge.set_value(len(self._pending_batches))
                # if we are building a block then send schedule it for
                # execution.
                if self._candidate_block and \
                        self._candidate_block.can_add_batch:
                    self._candidate_block.add_batch(batch)
            else:
                LOGGER.debug("Batch has an unauthorized signer. Batch: %s",
                             batch.header_signature)

    def _rebuild_pending_batches(self, committed_batches, uncommitted_batches):
        """When the chain head is changed. This recomputes the list of pending
        transactions
        :param committed_batches: Batches committed in the current chain
        since the root of the fork switching from.
        :param uncommitted_batches: Batches that were committed in the old
        fork since the common root.
        """
        if committed_batches is None:
            committed_batches = []
        if uncommitted_batches is None:
            uncommitted_batches = []

        committed_set = set([x.header_signature for x in committed_batches])

        pending_batches = self._pending_batches

        self._pending_batches = []
        self._pending_batch_ids = []

        num_committed_batches = len(committed_batches)
        if num_committed_batches > 0:
            # Only update the average if either:
            # a. Not drained below the current average
            # b. Drained the queue, but the queue was not bigger than the
            #    current running average
            remainder = len(self._pending_batches) - num_committed_batches
            if remainder > self._publish_count_average.value or \
                    num_committed_batches > self._publish_count_average.value:
                self._publish_count_average.update(num_committed_batches)

        # Uncommitted and pending disjoint sets
        # since batches can only be committed to a chain once.
        for batch in uncommitted_batches:
            if batch.header_signature not in committed_set:
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)

        for batch in pending_batches:
            if batch.header_signature not in committed_set:
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)

    def on_chain_updated(self, chain_head,
                         committed_batches=None,
                         uncommitted_batches=None):
        """
        The existing chain has been updated, the current head block has
        changed.

        :param chain_head: the new head of block_chain, can be None if
        no block publishing is desired.
        :param committed_batches: the set of batches that were committed
         as part of the new chain.
        :param uncommitted_batches: the list of transactions if any that are
        now de-committed when the new chain was selected.
        :return: None
        """
        try:
            with self._lock:
                if chain_head is not None:
                    LOGGER.info('Now building on top of block: %s', chain_head)
                else:
                    LOGGER.info('Block publishing is suspended until new '
                                'chain head arrives.')

                self._chain_head = chain_head

                if self._candidate_block:
                    self._candidate_block.cancel()

                self._candidate_block = None  # we need to make a new
                # _CandidateBlock (if we can) since the block chain has updated
                # under us.
                if chain_head is not None:
                    self._rebuild_pending_batches(committed_batches,
                                                  uncommitted_batches)
                    self._build_candidate_block(chain_head)

                    self._pending_batch_gauge.set_value(
                        len(self._pending_batches))

        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_chain_updated exception.")
            LOGGER.exception(exc)

    def on_check_publish_block(self, force=False):
        """Ask the consensus module if it is time to claim the candidate block
        if it is then, claim it and tell the world about it.
        :return:
            None
        """
        try:
            with self._lock:
                if (self._chain_head is not None
                        and self._candidate_block is None
                        and self._pending_batches):
                    self._build_candidate_block(self._chain_head)

                if self._candidate_block and (
                    force or
                    self._candidate_block.has_pending_batches()) and \
                        self._candidate_block.check_publish_block():

                    pending_batches = []  # will receive the list of batches
                    # that were not added to the block
                    injected_batch_ids = \
                        self._candidate_block.injected_batch_ids
                    last_batch = self._candidate_block.last_batch
                    block = self._candidate_block.finalize_block(
                        self._identity_signer,
                        pending_batches)
                    self._candidate_block = None
                    # Update the _pending_batches to reflect what we learned.

                    last_batch_index = self._pending_batches.index(last_batch)
                    unsent_batches = \
                        self._pending_batches[last_batch_index + 1:]
                    self._pending_batches = pending_batches + unsent_batches

                    self._pending_batch_gauge.set_value(
                        len(self._pending_batches))

                    if block:
                        blkw = BlockWrapper(block)
                        LOGGER.info("Claimed Block: %s", blkw)
                        self._block_sender.send(
                            blkw.block, keep_batches=injected_batch_ids)
                        self._blocks_published_count.inc()

                        # We built our candidate, disable processing until
                        # the chain head is updated. Only set this if
                        # we succeeded. Otherwise try again, this
                        # can happen in cases where txn dependencies
                        # did not validate when building the block.
                        self.on_chain_updated(None)

        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_check_publish_block exception.")
            LOGGER.exception(exc)

    def has_batch(self, batch_id):
        with self._lock:
            if batch_id in self._pending_batch_ids:
                return True
            if batch_id in self._queued_batch_ids:
                return True

        return False
示例#27
0
class ChainController(object):
    """
    To evaluating new blocks to determine if they should extend or replace
    the current chain. If they are valid extend the chain.
    """
    def __init__(self,
                 block_cache,
                 block_validator,
                 state_view_factory,
                 chain_head_lock,
                 on_chain_updated,
                 chain_id_manager,
                 data_dir,
                 config_dir,
                 chain_observers,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_validator: The object to use for submitting block validation
                work.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
            chain_id_manager: The ChainIdManager instance.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._notify_on_chain_updated = on_chain_updated
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._chain_observers = chain_observers
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
            self._blocks_considered_count = CounterWrapper(
                metrics_registry.counter('blocks_considered_count'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
            self._blocks_considered_count = CounterWrapper()

        self._block_queue = queue.Queue()
        self._chain_thread = None

        self._block_validator = block_validator

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()

    def _set_chain_head_from_block_store(self):
        try:
            self._chain_head = self._block_store.chain_head
            if self._chain_head is not None:
                LOGGER.info("Chain controller initialized with chain head: %s",
                            self._chain_head)
                self._chain_head_gauge.set_value(
                    self._chain_head.identifier[:8])
        except Exception:
            LOGGER.exception(
                "Invalid block store. Head of the block chain cannot be"
                " determined")
            raise

    def start(self):
        self._set_chain_head_from_block_store()
        self._notify_on_chain_updated(self._chain_head)

        self._chain_thread = _ChainThread(chain_controller=self,
                                          block_queue=self._block_queue,
                                          block_cache=self._block_cache)
        self._chain_thread.start()

    def stop(self):
        if self._chain_thread is not None:
            self._chain_thread.stop()
            self._chain_thread = None

    def queue_block(self, block):
        """
        New block has been received, queue it with the chain controller
        for processing.
        """
        self._block_queue.put(block)

    @property
    def chain_head(self):
        return self._chain_head

    def _submit_blocks_for_verification(self, blocks):
        self._block_validator.submit_blocks_for_verification(
            blocks, self.on_block_validated)

    def on_block_validated(self, commit_new_block, result):
        """Message back from the block validator, that the validation is
        complete
        Args:
        commit_new_block (Boolean): whether the new block should become the
        chain head or not.
        result (Dict): Map of the results of the fork resolution.
        Returns:
            None
        """
        try:
            with self._lock:
                self._blocks_considered_count.inc()
                new_block = result.block

                # if the head has changed, since we started the work.
                if result.chain_head.identifier !=\
                        self._chain_head.identifier:
                    LOGGER.info(
                        'Chain head updated from %s to %s while processing '
                        'block: %s', result.chain_head, self._chain_head,
                        new_block)

                    LOGGER.debug('Verify block again: %s ', new_block)
                    self._submit_blocks_for_verification([new_block])

                # If the head is to be updated to the new block.
                elif commit_new_block:
                    with self._chain_head_lock:
                        self._chain_head = new_block

                        # update the the block store to have the new chain
                        self._block_store.update_chain(result.new_chain,
                                                       result.current_chain)

                        LOGGER.info("Chain head updated to: %s",
                                    self._chain_head)

                        self._chain_head_gauge.set_value(
                            self._chain_head.identifier[:8])

                        self._committed_transactions_count.inc(
                            result.transaction_count)

                        self._block_num_gauge.set_value(
                            self._chain_head.block_num)

                        # tell the BlockPublisher else the chain is updated
                        self._notify_on_chain_updated(
                            self._chain_head, result.committed_batches,
                            result.uncommitted_batches)

                        for batch in new_block.batches:
                            if batch.trace:
                                LOGGER.debug("TRACE %s: %s",
                                             batch.header_signature,
                                             self.__class__.__name__)

                    for block in reversed(result.new_chain):
                        receipts = self._make_receipts(block.execution_results)
                        # Update all chain observers
                        for observer in self._chain_observers:
                            observer.chain_update(block, receipts)

                # The block is otherwise valid, but we have determined we
                # don't want it as the chain head.
                else:
                    LOGGER.info('Rejected new chain head: %s', new_block)

        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_validated()")

    def on_block_received(self, block):
        try:
            with self._lock:
                if self.has_block(block.header_signature):
                    # do we already have this block
                    return

                if self.chain_head is None:
                    self._set_genesis(block)
                    return

                self._block_cache[block.identifier] = block

                # schedule this block for validation.
                self._submit_blocks_for_verification([block])

        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_received()")

    def has_block(self, block_id):
        with self._lock:
            if block_id in self._block_cache:
                return True

            if self._block_validator.in_process(block_id):
                return True

            if self._block_validator.in_pending(block_id):
                return True

            return False

    def _set_genesis(self, block):
        # This is used by a non-genesis journal when it has received the
        # genesis block from the genesis validator
        if block.previous_block_id == NULL_BLOCK_IDENTIFIER:
            chain_id = self._chain_id_manager.get_block_chain_id()
            if chain_id is not None and chain_id != block.identifier:
                LOGGER.warning(
                    "Block id does not match block chain id %s. "
                    "Cannot set initial chain head.: %s", chain_id[:8],
                    block.identifier[:8])
            else:
                try:
                    self._block_validator.validate_block(block)
                except BlockValidationError as err:
                    LOGGER.warning(
                        'Cannot set chain head; '
                        'genesis block %s is not valid: %s', block, err)
                    return

                if chain_id is None:
                    self._chain_id_manager.save_block_chain_id(
                        block.identifier)
                self._block_store.update_chain([block])
                self._chain_head = block
                self._notify_on_chain_updated(self._chain_head)

        else:
            LOGGER.warning(
                "Cannot set initial chain head, this is not a "
                "genesis block: %s", block)

    def _make_receipts(self, results):
        receipts = []
        for result in results:
            receipt = TransactionReceipt()
            receipt.data.extend([data for data in result.data])
            receipt.state_changes.extend(result.state_changes)
            receipt.events.extend(result.events)
            receipt.transaction_id = result.signature
            receipts.append(receipt)
        return receipts
示例#28
0
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store,
                                      cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()
示例#29
0
class BlockPublisher(object):
    """
    Responsible for generating new blocks and publishing them when the
    Consensus deems it appropriate.
    """
    def __init__(self,
                 transaction_executor,
                 block_cache,
                 state_view_factory,
                 settings_cache,
                 block_sender,
                 batch_sender,
                 squash_handler,
                 chain_head,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 check_publish_block_frequency,
                 batch_observers,
                 batch_injector_factory=None,
                 metrics_registry=None):
        """
        Initialize the BlockPublisher object

        Args:
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            block_cache (:obj:`BlockCache`): A BlockCache instance.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            chain_head (:obj:`BlockWrapper`): The initial chain head.
            identity_signer (:obj:`Signer`): Cryptographic signer for signing
                blocks
            data_dir (str): path to location where persistent data for the
                consensus module can be stored.
            config_dir (str): path to location where configuration can be
                found.
            batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory
                for creating BatchInjectors.
            metrics_registry (MetricsRegistry): Metrics registry used to
                create pending batch gauge
        """
        self._lock = RLock()
        self._candidate_block = None  # _CandidateBlock helper,
        # the next block in potential chain
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._settings_cache = settings_cache
        self._transaction_executor = transaction_executor
        self._block_sender = block_sender
        self._batch_publisher = BatchPublisher(identity_signer, batch_sender)
        self._pending_batches = []  # batches we are waiting for validation,
        # arranged in the order of batches received.
        self._pending_batch_ids = []
        self._publish_count_average = _RollingAverage(
            NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT)

        self._chain_head = chain_head  # block (BlockWrapper)
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier
        self._batch_injector_factory = batch_injector_factory

        # For metric gathering
        if metrics_registry:
            self._pending_batch_gauge = GaugeWrapper(
                metrics_registry.gauge('pending_batch_gauge'))
            self._blocks_published_count = CounterWrapper(
                metrics_registry.counter('blocks_published_count'))
        else:
            self._blocks_published_count = CounterWrapper()
            self._pending_batch_gauge = GaugeWrapper()

        self._batch_queue = queue.Queue()
        self._queued_batch_ids = []
        self._batch_observers = batch_observers
        self._check_publish_block_frequency = check_publish_block_frequency
        self._publisher_thread = None

        # A series of states that allow us to check for condition changes.
        # These can be used to log only at the boundary of condition changes.
        self._logging_states = _PublisherLoggingStates()

    def start(self):
        self._publisher_thread = _PublisherThread(
            block_publisher=self,
            batch_queue=self._batch_queue,
            check_publish_block_frequency=self._check_publish_block_frequency)
        self._publisher_thread.start()

    def stop(self):
        if self._publisher_thread is not None:
            self._publisher_thread.stop()
            self._publisher_thread = None

    def queue_batch(self, batch):
        """
        New batch has been received, queue it with the BlockPublisher for
        inclusion in the next block.
        """
        self._batch_queue.put(batch)
        self._queued_batch_ids.append(batch.header_signature)
        for observer in self._batch_observers:
            observer.notify_batch_pending(batch)

    def can_accept_batch(self):
        return len(self._pending_batches) < self._get_current_queue_limit()

    def _get_current_queue_limit(self):
        # Limit the number of batches to 2 times the publishing average.  This
        # allows the queue to grow geometrically, if the queue is drained.
        return 2 * self._publish_count_average.value

    def get_current_queue_info(self):
        """Returns a tuple of the current size of the pending batch queue
        and the current queue limit.
        """
        return (len(self._pending_batches), self._get_current_queue_limit())

    @property
    def chain_head_lock(self):
        return self._lock

    def _build_candidate_block(self, chain_head):
        """ Build a candidate block and construct the consensus object to
        validate it.
        :param chain_head: The block to build on top of.
        :return: (BlockBuilder) - The candidate block in a BlockBuilder
        wrapper.
        """
        state_view = BlockWrapper.state_view_for_block(
            chain_head, self._state_view_factory)
        consensus_module = ConsensusFactory.get_configured_consensus_module(
            chain_head.header_signature, state_view)

        # using chain_head so so we can use the setting_cache
        max_batches = int(
            self._settings_cache.get_setting(
                'sawtooth.publisher.max_batches_per_block',
                chain_head.state_root_hash,
                default_value=0))

        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = consensus_module.\
            BlockPublisher(block_cache=self._block_cache,
                           state_view_factory=self._state_view_factory,
                           batch_publisher=self._batch_publisher,
                           data_dir=self._data_dir,
                           config_dir=self._config_dir,
                           validator_id=public_key)

        batch_injectors = []
        if self._batch_injector_factory is not None:
            batch_injectors = self._batch_injector_factory.create_injectors(
                chain_head.identifier)
            if batch_injectors:
                LOGGER.debug("Loaded batch injectors: %s", batch_injectors)

        block_header = BlockHeader(
            block_num=chain_head.block_num + 1,
            previous_block_id=chain_head.header_signature,
            signer_public_key=public_key)
        block_builder = BlockBuilder(block_header)

        if not consensus.initialize_block(block_builder.block_header):
            if not self._logging_states.consensus_not_ready:
                self._logging_states.consensus_not_ready = True
                LOGGER.debug("Consensus not ready to build candidate block.")
            return None

        if self._logging_states.consensus_not_ready:
            self._logging_states.consensus_not_ready = False
            LOGGER.debug("Consensus is ready to build candidate block.")

        # create a new scheduler
        scheduler = self._transaction_executor.create_scheduler(
            self._squash_handler, chain_head.state_root_hash)

        # build the TransactionCommitCache
        committed_txn_cache = TransactionCommitCache(
            self._block_cache.block_store)

        self._transaction_executor.execute(scheduler)
        self._candidate_block = _CandidateBlock(
            self._block_cache.block_store, consensus, scheduler,
            committed_txn_cache, block_builder, max_batches, batch_injectors,
            SettingsView(state_view), public_key)

        for batch in self._pending_batches:
            if self._candidate_block.can_add_batch:
                self._candidate_block.add_batch(batch)
            else:
                break

    def on_batch_received(self, batch):
        """
        A new batch is received, send it for validation
        :param batch: the new pending batch
        :return: None
        """
        with self._lock:
            self._queued_batch_ids = self._queued_batch_ids[:1]
            if self._permission_verifier.is_batch_signer_authorized(batch):
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)
                self._pending_batch_gauge.set_value(len(self._pending_batches))
                # if we are building a block then send schedule it for
                # execution.
                if self._candidate_block and \
                        self._candidate_block.can_add_batch:
                    self._candidate_block.add_batch(batch)
            else:
                LOGGER.debug("Batch has an unauthorized signer. Batch: %s",
                             batch.header_signature)

    def _rebuild_pending_batches(self, committed_batches, uncommitted_batches):
        """When the chain head is changed. This recomputes the list of pending
        transactions
        :param committed_batches: Batches committed in the current chain
        since the root of the fork switching from.
        :param uncommitted_batches: Batches that were committed in the old
        fork since the common root.
        """
        if committed_batches is None:
            committed_batches = []
        if uncommitted_batches is None:
            uncommitted_batches = []

        committed_set = set([x.header_signature for x in committed_batches])

        pending_batches = self._pending_batches

        self._pending_batches = []
        self._pending_batch_ids = []

        num_committed_batches = len(committed_batches)
        if num_committed_batches > 0:
            # Only update the average if either:
            # a. Not drained below the current average
            # b. Drained the queue, but the queue was not bigger than the
            #    current running average
            remainder = len(self._pending_batches) - num_committed_batches
            if remainder > self._publish_count_average.value or \
                    num_committed_batches > self._publish_count_average.value:
                self._publish_count_average.update(num_committed_batches)

        # Uncommitted and pending disjoint sets
        # since batches can only be committed to a chain once.
        for batch in uncommitted_batches:
            if batch.header_signature not in committed_set:
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)

        for batch in pending_batches:
            if batch.header_signature not in committed_set:
                self._pending_batches.append(batch)
                self._pending_batch_ids.append(batch.header_signature)

    def on_chain_updated(self,
                         chain_head,
                         committed_batches=None,
                         uncommitted_batches=None):
        """
        The existing chain has been updated, the current head block has
        changed.

        :param chain_head: the new head of block_chain, can be None if
        no block publishing is desired.
        :param committed_batches: the set of batches that were committed
         as part of the new chain.
        :param uncommitted_batches: the list of transactions if any that are
        now de-committed when the new chain was selected.
        :return: None
        """
        try:
            with self._lock:
                if chain_head is not None:
                    LOGGER.info('Now building on top of block: %s', chain_head)
                else:
                    LOGGER.info('Block publishing is suspended until new '
                                'chain head arrives.')

                self._chain_head = chain_head

                if self._candidate_block:
                    self._candidate_block.cancel()

                self._candidate_block = None  # we need to make a new
                # _CandidateBlock (if we can) since the block chain has updated
                # under us.
                if chain_head is not None:
                    self._rebuild_pending_batches(committed_batches,
                                                  uncommitted_batches)
                    self._build_candidate_block(chain_head)

                    self._pending_batch_gauge.set_value(
                        len(self._pending_batches))

        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_chain_updated exception.")
            LOGGER.exception(exc)

    def on_check_publish_block(self, force=False):
        """Ask the consensus module if it is time to claim the candidate block
        if it is then, claim it and tell the world about it.
        :return:
            None
        """
        try:
            with self._lock:
                if (self._chain_head is not None
                        and self._candidate_block is None
                        and self._pending_batches):
                    self._build_candidate_block(self._chain_head)

                if self._candidate_block and (
                    force or
                    self._candidate_block.has_pending_batches()) and \
                        self._candidate_block.check_publish_block():

                    pending_batches = []  # will receive the list of batches
                    # that were not added to the block
                    injected_batch_ids = \
                        self._candidate_block.injected_batch_ids
                    last_batch = self._candidate_block.last_batch
                    block = self._candidate_block.finalize_block(
                        self._identity_signer, pending_batches)
                    self._candidate_block = None
                    # Update the _pending_batches to reflect what we learned.

                    last_batch_index = self._pending_batches.index(last_batch)
                    unsent_batches = \
                        self._pending_batches[last_batch_index + 1:]
                    self._pending_batches = pending_batches + unsent_batches

                    self._pending_batch_gauge.set_value(
                        len(self._pending_batches))

                    if block:
                        blkw = BlockWrapper(block)
                        LOGGER.info("Claimed Block: %s", blkw)
                        self._block_sender.send(
                            blkw.block, keep_batches=injected_batch_ids)
                        self._blocks_published_count.inc()

                        # We built our candidate, disable processing until
                        # the chain head is updated. Only set this if
                        # we succeeded. Otherwise try again, this
                        # can happen in cases where txn dependencies
                        # did not validate when building the block.
                        self.on_chain_updated(None)

        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_check_publish_block exception.")
            LOGGER.exception(exc)

    def has_batch(self, batch_id):
        with self._lock:
            if batch_id in self._pending_batch_ids:
                return True
            if batch_id in self._queued_batch_ids:
                return True

        return False
示例#30
0
    def __init__(self,
                 transaction_executor,
                 block_cache,
                 state_view_factory,
                 settings_cache,
                 block_sender,
                 batch_sender,
                 squash_handler,
                 chain_head,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 check_publish_block_frequency,
                 batch_observers,
                 batch_injector_factory=None,
                 metrics_registry=None):
        """
        Initialize the BlockPublisher object

        Args:
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            block_cache (:obj:`BlockCache`): A BlockCache instance.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            chain_head (:obj:`BlockWrapper`): The initial chain head.
            identity_signer (:obj:`Signer`): Cryptographic signer for signing
                blocks
            data_dir (str): path to location where persistent data for the
                consensus module can be stored.
            config_dir (str): path to location where configuration can be
                found.
            batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory
                for creating BatchInjectors.
            metrics_registry (MetricsRegistry): Metrics registry used to
                create pending batch gauge
        """
        self._lock = RLock()
        self._candidate_block = None  # _CandidateBlock helper,
        # the next block in potential chain
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._settings_cache = settings_cache
        self._transaction_executor = transaction_executor
        self._block_sender = block_sender
        self._batch_publisher = BatchPublisher(identity_signer, batch_sender)
        self._pending_batches = []  # batches we are waiting for validation,
        # arranged in the order of batches received.
        self._pending_batch_ids = []
        self._publish_count_average = _RollingAverage(
            NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT)

        self._chain_head = chain_head  # block (BlockWrapper)
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier
        self._batch_injector_factory = batch_injector_factory

        # For metric gathering
        if metrics_registry:
            self._pending_batch_gauge = GaugeWrapper(
                metrics_registry.gauge('pending_batch_gauge'))
            self._blocks_published_count = CounterWrapper(
                metrics_registry.counter('blocks_published_count'))
        else:
            self._blocks_published_count = CounterWrapper()
            self._pending_batch_gauge = GaugeWrapper()

        self._batch_queue = queue.Queue()
        self._queued_batch_ids = []
        self._batch_observers = batch_observers
        self._check_publish_block_frequency = check_publish_block_frequency
        self._publisher_thread = None

        # A series of states that allow us to check for condition changes.
        # These can be used to log only at the boundary of condition changes.
        self._logging_states = _PublisherLoggingStates()
示例#31
0
class ChainController(object):
    """
    To evaluating new blocks to determine if they should extend or replace
    the current chain. If they are valid extend the chain.
    """

    def __init__(self,
                 block_cache,
                 block_validator,
                 state_view_factory,
                 chain_head_lock,
                 on_chain_updated,
                 chain_id_manager,
                 data_dir,
                 config_dir,
                 chain_observers,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_validator: The object to use for submitting block validation
                work.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
            chain_id_manager: The ChainIdManager instance.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._notify_on_chain_updated = on_chain_updated
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._chain_observers = chain_observers
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
            self._blocks_considered_count = CounterWrapper(
                metrics_registry.counter('blocks_considered_count'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
            self._blocks_considered_count = CounterWrapper()

        self._block_queue = queue.Queue()
        self._chain_thread = None

        self._block_validator = block_validator

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()

    def _set_chain_head_from_block_store(self):
        try:
            self._chain_head = self._block_store.chain_head
            if self._chain_head is not None:
                LOGGER.info("Chain controller initialized with chain head: %s",
                            self._chain_head)
                self._chain_head_gauge.set_value(
                    self._chain_head.identifier[:8])
        except Exception:
            LOGGER.exception(
                "Invalid block store. Head of the block chain cannot be"
                " determined")
            raise

    def start(self):
        self._set_chain_head_from_block_store()
        self._notify_on_chain_updated(self._chain_head)

        self._chain_thread = _ChainThread(
            chain_controller=self,
            block_queue=self._block_queue,
            block_cache=self._block_cache)
        self._chain_thread.start()

    def stop(self):
        if self._chain_thread is not None:
            self._chain_thread.stop()
            self._chain_thread = None

    def queue_block(self, block):
        """
        New block has been received, queue it with the chain controller
        for processing.
        """
        self._block_queue.put(block)

    @property
    def chain_head(self):
        return self._chain_head

    def _submit_blocks_for_verification(self, blocks):
        self._block_validator.submit_blocks_for_verification(
            blocks, self.on_block_validated)

    def on_block_validated(self, commit_new_block, result):
        """Message back from the block validator, that the validation is
        complete
        Args:
        commit_new_block (Boolean): whether the new block should become the
        chain head or not.
        result (Dict): Map of the results of the fork resolution.
        Returns:
            None
        """
        try:
            with self._lock:
                self._blocks_considered_count.inc()
                new_block = result.block

                # if the head has changed, since we started the work.
                if result.chain_head.identifier !=\
                        self._chain_head.identifier:
                    LOGGER.info(
                        'Chain head updated from %s to %s while processing '
                        'block: %s',
                        result.chain_head,
                        self._chain_head,
                        new_block)

                    LOGGER.debug('Verify block again: %s ', new_block)
                    self._submit_blocks_for_verification([new_block])

                # If the head is to be updated to the new block.
                elif commit_new_block:
                    with self._chain_head_lock:
                        self._chain_head = new_block

                        # update the the block store to have the new chain
                        self._block_store.update_chain(result.new_chain,
                                                       result.current_chain)

                        LOGGER.info(
                            "Chain head updated to: %s",
                            self._chain_head)

                        self._chain_head_gauge.set_value(
                            self._chain_head.identifier[:8])

                        self._committed_transactions_count.inc(
                            result.transaction_count)

                        self._block_num_gauge.set_value(
                            self._chain_head.block_num)

                        # tell the BlockPublisher else the chain is updated
                        self._notify_on_chain_updated(
                            self._chain_head,
                            result.committed_batches,
                            result.uncommitted_batches)

                        for batch in new_block.batches:
                            if batch.trace:
                                LOGGER.debug("TRACE %s: %s",
                                             batch.header_signature,
                                             self.__class__.__name__)

                    for block in reversed(result.new_chain):
                        receipts = self._make_receipts(block.execution_results)
                        # Update all chain observers
                        for observer in self._chain_observers:
                            observer.chain_update(block, receipts)

                # The block is otherwise valid, but we have determined we
                # don't want it as the chain head.
                else:
                    LOGGER.info('Rejected new chain head: %s', new_block)

        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_validated()")

    def on_block_received(self, block):
        try:
            with self._lock:
                if self.has_block(block.header_signature):
                    # do we already have this block
                    return

                if self.chain_head is None:
                    self._set_genesis(block)
                    return

                self._block_cache[block.identifier] = block

                # schedule this block for validation.
                self._submit_blocks_for_verification([block])

        # pylint: disable=broad-except
        except Exception:
            LOGGER.exception(
                "Unhandled exception in ChainController.on_block_received()")

    def has_block(self, block_id):
        with self._lock:
            if block_id in self._block_cache:
                return True

            if self._block_validator.in_process(block_id):
                return True

            if self._block_validator.in_pending(block_id):
                return True

            return False

    def _set_genesis(self, block):
        # This is used by a non-genesis journal when it has received the
        # genesis block from the genesis validator
        if block.previous_block_id == NULL_BLOCK_IDENTIFIER:
            chain_id = self._chain_id_manager.get_block_chain_id()
            if chain_id is not None and chain_id != block.identifier:
                LOGGER.warning("Block id does not match block chain id %s. "
                               "Cannot set initial chain head.: %s",
                               chain_id[:8], block.identifier[:8])
            else:
                try:
                    self._block_validator.validate_block(block)
                except BlockValidationError as err:
                    LOGGER.warning(
                        'Cannot set chain head; '
                        'genesis block %s is not valid: %s',
                        block, err)
                    return

                if chain_id is None:
                    self._chain_id_manager.save_block_chain_id(
                        block.identifier)
                self._block_store.update_chain([block])
                self._chain_head = block
                self._notify_on_chain_updated(self._chain_head)

        else:
            LOGGER.warning("Cannot set initial chain head, this is not a "
                           "genesis block: %s", block)

    def _make_receipts(self, results):
        receipts = []
        for result in results:
            receipt = TransactionReceipt()
            receipt.data.extend([data for data in result.data])
            receipt.state_changes.extend(result.state_changes)
            receipt.events.extend(result.events)
            receipt.transaction_id = result.signature
            receipts.append(receipt)
        return receipts
示例#32
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge('completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            if not self._has_block(block.previous_block_id):
                if block.previous_block_id not in self._incomplete_blocks:
                    self._incomplete_blocks[block.previous_block_id] = [block]
                elif block not in \
                        self._incomplete_blocks[block.previous_block_id]:
                    self._incomplete_blocks[block.previous_block_id] += [block]

                # We have already requested the block, do not do so again
                if block.previous_block_id in self._requested:
                    return None

                LOGGER.debug("Request missing predecessor: %s",
                             block.previous_block_id)
                self._requested[block.previous_block_id] = None
                self.gossip.broadcast_block_request(block.previous_block_id)
                return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]

                    # We have already requested the batch, do not do so again
                    if batch_id in self._requested:
                        return None
                    self._requested[batch_id] = None
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            if block.header_signature in self._requested:
                del self._requested[block.header_signature]
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]

                return block
            else:
                LOGGER.debug(
                    "Block.header.batch_ids does not match set of "
                    "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    self._unsatisfied_dependency_count.inc()

                    # Check to see if the dependency has already been requested
                    if dependency not in self._requested:
                        dependencies.append(dependency)
                        self._requested[dependency] = None
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            self._seen_txns[txn.header_signature] = batch.header_signature
            self._seen_txns_length.set_value(len(self._seen_txns))

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def set_chain_has_block(self, set_chain_has_block):
        self._has_block = set_chain_has_block

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
            self._incomplete_blocks_length.set_value(
                len(self._incomplete_blocks))

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                if batch.header_signature in self._requested:
                    del self._requested[batch.header_signature]
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        if txn.header_signature in self._requested:
                            del self._requested[txn.header_signature]
                        self._process_incomplete_batches(txn.header_signature)
            self._incomplete_batches_length.set_value(
                len(self._incomplete_batches))

    def get_chain_head(self):
        """Returns the block which is the current head of the chain.

        Returns:
            BlockWrapper: The head of the chain.
        """
        with self.lock:
            return self._block_store.chain_head

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None
示例#33
0
文件: chain.py 项目: sambacha/sprawl
    def __init__(self, block_cache, block_sender, state_view_factory, executor,
                 transaction_executor, chain_head_lock, on_chain_updated,
                 squash_handler, chain_id_manager, identity_signing_key,
                 data_dir, config_dir, permission_verifier, chain_observers,
                 metrics_registry):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_sender: an interface object used to send blocks to the
                network.
            state_view_factory: The factory object to create
            executor: The thread pool to process block validations.
            transaction_executor: The TransactionExecutor used to produce
                schedulers for batch validation.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
                 squash_handler: a parameter passed when creating transaction
                 schedulers.
            chain_id_manager: The ChainIdManager instance.
            identity_signing_key: Private key for signing blocks.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._block_sender = block_sender
        self._executor = executor
        self._transaction_executor = transaction_executor
        self._notify_on_chain_updated = on_chain_updated
        self._squash_handler = squash_handler
        self._identity_signing_key = identity_signing_key
        self._identity_public_key = \
            signing.generate_pubkey(self._identity_signing_key)
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._blocks_processing = {}  # a set of blocks that are
        # currently being processed.
        self._blocks_pending = {}  # set of blocks that the previous block
        # is being processed. Once that completes this block will be
        # scheduled for validation.
        self._chain_id_manager = chain_id_manager

        try:
            self._chain_head = self._block_store.chain_head
            if self._chain_head is not None:
                LOGGER.info("Chain controller initialized with chain head: %s",
                            self._chain_head)
        except Exception as exc:
            LOGGER.error("Invalid block store. Head of the block chain cannot "
                         "be determined")
            LOGGER.exception(exc)
            raise

        self._notify_on_chain_updated(self._chain_head)
        self._permission_verifier = permission_verifier
        self._chain_observers = chain_observers
        self._chain_head_gauge = \
            metrics_registry.gauge('chain_head', default='no chain head') \
            if metrics_registry else None

        if metrics_registry:
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
        else:
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
示例#34
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """

    def __init__(self,
                 consensus_module,
                 block_cache,
                 new_block,
                 state_view_factory,
                 done_cb,
                 executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None):
        """Initialize the BlockValidator
        Args:
             consensus_module: The consensus module that contains
             implementation of the consensus algorithm to use for block
             validation.
             block_cache: The cache of all recent blocks and the processing
             state associated with them.
             new_block: The block to validate.
             state_view_factory: The factory object to create.
             done_cb: The method to call when block validation completed
             executor: The thread pool to process block validations.
             squash_handler: A parameter passed when creating transaction
             schedulers.
             identity_signer: A cryptographic signer for signing blocks.
             data_dir: Path to location where persistent data for the
             consensus module can be stored.
             config_dir: Path to location where config data for the
             consensus module can be found.
        Returns:
            None
        """
        self._consensus_module = consensus_module
        self._block_cache = block_cache
        self._chain_commit_state = ChainCommitState(
            self._block_cache.block_store, [])
        self._new_block = new_block

        # Set during execution of the of the  BlockValidation to the current
        # chain_head at that time.
        self._chain_head = None

        self._state_view_factory = state_view_factory
        self._done_cb = done_cb
        self._executor = executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._result = {
            'new_block': new_block,
            'chain_head': None,
            'new_chain': [],
            'cur_chain': [],
            'committed_batches': [],
            'uncommitted_batches': [],
            'num_transactions': 0
        }
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = \
            ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

    def _get_previous_block_root_state_hash(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _txn_header(self, txn):
        txn_hdr = TransactionHeader()
        txn_hdr.ParseFromString(txn.header)
        return txn_hdr

    def _verify_batch_transactions(self, batch):
        """Verify that all transactions in are unique and that all
        transactions dependencies in this batch have been satisfied, ie
        already committed by this block or prior block in the chain.

        :param batch: the batch to verify
        :return:
        Boolean: True if all dependencies are present and all transactions
        are unique.
        """
        for txn in batch.transactions:
            txn_hdr = self._txn_header(txn)
            if self._chain_commit_state.has_transaction(txn.header_signature):
                LOGGER.debug(
                    "Block rejected due to duplicate"
                    " transaction, transaction: %s",
                    txn.header_signature[:8])
                raise InvalidBatch()
            for dep in txn_hdr.dependencies:
                if not self._chain_commit_state.has_transaction(dep):
                    LOGGER.debug(
                        "Block rejected due to missing "
                        "transaction dependency, transaction %s "
                        "depends on %s",
                        txn.header_signature[:8],
                        dep[:8])
                    raise InvalidBatch()
            self._chain_commit_state.add_txn(txn.header_signature)

    def _verify_block_batches(self, blkw):
        if blkw.block.batches:
            prev_state = self._get_previous_block_root_state_hash(blkw)
            scheduler = self._executor.create_scheduler(
                self._squash_handler, prev_state)
            self._executor.execute(scheduler)
            try:
                for batch, has_more in look_ahead(blkw.block.batches):
                    if self._chain_commit_state.has_batch(
                            batch.header_signature):
                        LOGGER.debug("Block(%s) rejected due to duplicate "
                                     "batch, batch: %s", blkw,
                                     batch.header_signature[:8])
                        raise InvalidBatch()

                    self._verify_batch_transactions(batch)
                    self._chain_commit_state.add_batch(
                        batch, add_transactions=False)
                    if has_more:
                        scheduler.add_batch(batch)
                    else:
                        scheduler.add_batch(batch, blkw.state_root_hash)
            except InvalidBatch:
                LOGGER.debug("Invalid batch %s encountered during "
                             "verification of block %s",
                             batch.header_signature[:8],
                             blkw)
                scheduler.cancel()
                return False
            except Exception:
                scheduler.cancel()
                raise

            scheduler.finalize()
            scheduler.complete(block=True)
            state_hash = None

            for batch in blkw.batches:
                batch_result = scheduler.get_batch_execution_result(
                    batch.header_signature)
                if batch_result is not None and batch_result.is_valid:
                    txn_results = \
                        scheduler.get_transaction_execution_results(
                            batch.header_signature)
                    blkw.execution_results.extend(txn_results)
                    state_hash = batch_result.state_hash
                    blkw.num_transactions += len(batch.transactions)
                else:
                    return False
            if blkw.state_root_hash != state_hash:
                LOGGER.debug("Block(%s) rejected due to state root hash "
                             "mismatch: %s != %s", blkw, blkw.state_root_hash,
                             state_hash)
                return False
        return True

    def _validate_permissions(self, blkw):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            try:
                state_root = self._get_previous_block_root_state_hash(blkw)
            except KeyError:
                LOGGER.info(
                    "Block rejected due to missing predecessor: %s", blkw)
                return False

            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, state_root):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            try:
                state_root = self._get_previous_block_root_state_hash(blkw)
            except KeyError:
                LOGGER.debug(
                    "Block rejected due to missing" + " predecessor: %s", blkw)
                return False
            return self._validation_rule_enforcer.validate(blkw, state_root)
        return True

    def validate_block(self, blkw):
        # pylint: disable=broad-except
        try:
            if blkw.status == BlockStatus.Valid:
                return True
            elif blkw.status == BlockStatus.Invalid:
                return False
            else:
                valid = True

                valid = self._validate_permissions(blkw)

                if valid:
                    public_key = \
                        self._identity_signer.get_public_key().as_hex()
                    consensus = self._consensus_module.BlockVerifier(
                        block_cache=self._block_cache,
                        state_view_factory=self._state_view_factory,
                        data_dir=self._data_dir,
                        config_dir=self._config_dir,
                        validator_id=public_key)
                    valid = consensus.verify_block(blkw)

                if valid:
                    valid = self._validate_on_chain_rules(blkw)

                if valid:
                    valid = self._verify_block_batches(blkw)

                # since changes to the chain-head can change the state of the
                # blocks in BlockStore we have to revalidate this block.
                block_store = self._block_cache.block_store
                if (self._chain_head is not None
                        and self._chain_head.identifier !=
                        block_store.chain_head.identifier):
                    raise ChainHeadUpdated()

                blkw.status = \
                    BlockStatus.Valid if valid else BlockStatus.Invalid

                return valid
        except ChainHeadUpdated as chu:
            raise chu
        except Exception:
            LOGGER.exception(
                "Unhandled exception BlockPublisher.validate_block()")
            return False

    def _find_common_height(self, new_chain, cur_chain):
        """
        Walk back on the longest chain until we find a predecessor that is the
        same height as the other chain.
        The blocks are recorded in the corresponding lists
        and the blocks at the same height are returned
        """
        new_blkw = self._new_block
        cur_blkw = self._chain_head
        # 1) find the common ancestor of this block in the current chain
        # Walk back until we have both chains at the same length

        # Walk back the new chain to find the block that is the
        # same height as the current head.
        if new_blkw.block_num > cur_blkw.block_num:
            # new chain is longer
            # walk the current chain back until we find the block that is the
            # same height as the current chain.
            while new_blkw.block_num > cur_blkw.block_num and \
                    new_blkw.previous_block_id != NULL_BLOCK_IDENTIFIER:
                new_chain.append(new_blkw)
                try:
                    new_blkw = self._block_cache[new_blkw.previous_block_id]
                except KeyError:
                    LOGGER.info(
                        "Block %s rejected due to missing predecessor %s",
                        new_blkw,
                        new_blkw.previous_block_id)
                    for b in new_chain:
                        b.status = BlockStatus.Invalid
                    raise BlockValidationAborted()
        elif new_blkw.block_num < cur_blkw.block_num:
            # current chain is longer
            # walk the current chain back until we find the block that is the
            # same height as the new chain.
            while (cur_blkw.block_num > new_blkw.block_num
                   and new_blkw.previous_block_id != NULL_BLOCK_IDENTIFIER):
                cur_chain.append(cur_blkw)
                cur_blkw = self._block_cache[cur_blkw.previous_block_id]
        return (new_blkw, cur_blkw)

    def _find_common_ancestor(self, new_blkw, cur_blkw, new_chain, cur_chain):
        """ Finds a common ancestor of the two chains.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                LOGGER.info(
                    "Block rejected due to wrong genesis: %s %s",
                    cur_blkw, new_blkw)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()
            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                LOGGER.info(
                    "Block %s rejected due to missing predecessor %s",
                    new_blkw,
                    new_blkw.previous_block_id)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _test_commit_new_chain(self):
        """ Compare the two chains and determine which should be the head.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        fork_resolver = \
            self._consensus_module.ForkResolver(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

        return fork_resolver.compare_forks(self._chain_head, self._new_block)

    def _compute_batch_change(self, new_chain, cur_chain):
        """
        Compute the batch change sets.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def run(self):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            LOGGER.info("Starting block validation of : %s", self._new_block)
            cur_chain = self._result["cur_chain"]  # ordered list of the
            # current chain blocks
            new_chain = self._result["new_chain"]  # ordered list of the new
            # chain blocks

            # get the current chain_head.
            self._chain_head = self._block_cache.block_store.chain_head
            self._result['chain_head'] = self._chain_head

            # 1) Find the common ancestor block, the root of the fork.
            # walk back till both chains are the same height
            (new_blkw, cur_blkw) = self._find_common_height(new_chain,
                                                            cur_chain)

            # 2) Walk back until we find the common ancestor
            self._find_common_ancestor(new_blkw, cur_blkw,
                                       new_chain, cur_chain)

            # 3) Determine the validity of the new fork
            # build the transaction cache to simulate the state of the
            # chain at the common root.
            self._chain_commit_state = ChainCommitState(
                self._block_cache.block_store, cur_chain)

            valid = True
            for block in reversed(new_chain):
                if valid:
                    if not self.validate_block(block):
                        LOGGER.info("Block validation failed: %s", block)
                        valid = False
                    self._result["num_transactions"] += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid (invalid predecessor): %s",
                        block)
                    block.status = BlockStatus.Invalid

            if not valid:
                self._done_cb(False, self._result)
                return

            # 4) Evaluate the 2 chains to see if the new chain should be
            # committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                self._chain_head, self._new_block)
            for i in range(max(len(new_chain), len(cur_chain))):
                cur = new = num = "-"
                if i < len(cur_chain):
                    cur = cur_chain[i].header_signature[:8]
                    num = cur_chain[i].block_num
                if i < len(new_chain):
                    new = new_chain[i].header_signature[:8]
                    num = new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s",
                    num, cur, new)

            commit_new_chain = self._test_commit_new_chain()

            # 5) Consensus to compute batch sets (only if we are switching).
            if commit_new_chain:
                (self._result["committed_batches"],
                 self._result["uncommitted_batches"]) = \
                    self._compute_batch_change(new_chain, cur_chain)

                if new_chain[0].previous_block_id != \
                        self._chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # 6) Tell the journal we are done.
            self._done_cb(commit_new_chain, self._result)

            LOGGER.info(
                "Finished block validation of: %s",
                self._new_block)
        except BlockValidationAborted:
            self._done_cb(False, self._result)
            return
        except ChainHeadUpdated:
            self._done_cb(False, self._result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s",
                self._new_block)
            # callback to clean up the block out of the processing list.
            self._done_cb(False, self._result)
示例#35
0
    def __init__(self,
                 transaction_executor,
                 block_cache,
                 state_view_factory,
                 settings_cache,
                 block_sender,
                 batch_sender,
                 squash_handler,
                 chain_head,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 check_publish_block_frequency,
                 batch_observers,
                 batch_injector_factory=None,
                 metrics_registry=None):
        """
        Initialize the BlockPublisher object

        Args:
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            block_cache (:obj:`BlockCache`): A BlockCache instance.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            chain_head (:obj:`BlockWrapper`): The initial chain head.
            identity_signer (:obj:`Signer`): Cryptographic signer for signing
                blocks
            data_dir (str): path to location where persistent data for the
                consensus module can be stored.
            config_dir (str): path to location where configuration can be
                found.
            batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory
                for creating BatchInjectors.
            metrics_registry (MetricsRegistry): Metrics registry used to
                create pending batch gauge
        """
        self._lock = RLock()
        self._candidate_block = None  # _CandidateBlock helper,
        # the next block in potential chain
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._settings_cache = settings_cache
        self._transaction_executor = transaction_executor
        self._block_sender = block_sender
        self._batch_publisher = BatchPublisher(identity_signer, batch_sender)
        self._pending_batches = []  # batches we are waiting for validation,
        # arranged in the order of batches received.
        self._pending_batch_ids = []
        self._publish_count_average = _RollingAverage(
            NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT)

        self._chain_head = chain_head  # block (BlockWrapper)
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier
        self._batch_injector_factory = batch_injector_factory

        # For metric gathering
        if metrics_registry:
            self._pending_batch_gauge = GaugeWrapper(
                metrics_registry.gauge('pending_batch_gauge'))
            self._blocks_published_count = CounterWrapper(
                metrics_registry.counter('blocks_published_count'))
        else:
            self._blocks_published_count = CounterWrapper()
            self._pending_batch_gauge = GaugeWrapper()

        self._batch_queue = queue.Queue()
        self._queued_batch_ids = []
        self._batch_observers = batch_observers
        self._check_publish_block_frequency = check_publish_block_frequency
        self._publisher_thread = None

        # A series of states that allow us to check for condition changes.
        # These can be used to log only at the boundary of condition changes.
        self._logging_states = _PublisherLoggingStates()
示例#36
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """

    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store,
                                      cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge(
                    'completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            if not self._has_block(block.previous_block_id):
                if block.previous_block_id not in self._incomplete_blocks:
                    self._incomplete_blocks[block.previous_block_id] = [block]
                elif block not in \
                        self._incomplete_blocks[block.previous_block_id]:
                    self._incomplete_blocks[block.previous_block_id] += [block]

                # We have already requested the block, do not do so again
                if block.previous_block_id in self._requested:
                    return None

                LOGGER.debug("Request missing predecessor: %s",
                             block.previous_block_id)
                self._requested[block.previous_block_id] = None
                self.gossip.broadcast_block_request(block.previous_block_id)
                return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]

                    # We have already requested the batch, do not do so again
                    if batch_id in self._requested:
                        return None
                    self._requested[batch_id] = None
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            if block.header_signature in self._requested:
                del self._requested[block.header_signature]
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]

                return block
            else:
                LOGGER.debug("Block.header.batch_ids does not match set of "
                             "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    self._unsatisfied_dependency_count.inc()

                    # Check to see if the dependency has already been requested
                    if dependency not in self._requested:
                        dependencies.append(dependency)
                        self._requested[dependency] = None
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(
                dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            self._seen_txns[txn.header_signature] = batch.header_signature
            self._seen_txns_length.set_value(
                len(self._seen_txns))

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def set_chain_has_block(self, set_chain_has_block):
        self._has_block = set_chain_has_block

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
            self._incomplete_blocks_length.set_value(
                len(self._incomplete_blocks))

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                if batch.header_signature in self._requested:
                    del self._requested[batch.header_signature]
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        if txn.header_signature in self._requested:
                            del self._requested[txn.header_signature]
                        self._process_incomplete_batches(txn.header_signature)
            self._incomplete_batches_length.set_value(
                len(self._incomplete_batches))

    def get_chain_head(self):
        """Returns the block which is the current head of the chain.

        Returns:
            BlockWrapper: The head of the chain.
        """
        with self.lock:
            return self._block_store.chain_head

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None