Exemplo n.º 1
0
 def __init__(self,
              completer,
              cache_keep_time=300,
              cache_purge_frequency=30):
     self.completer = completer
     self.pending_requests = TimedCache(cache_keep_time,
                                        cache_purge_frequency)
     self._lock = RLock()
Exemplo n.º 2
0
    def __init__(self,
                 block_store,
                 cache_keep_time=600,
                 cache_purge_frequency=30):
        self._block_store = block_store
        self._batch_info = TimedCache(cache_keep_time, cache_purge_frequency)
        self._invalid = TimedCache(cache_keep_time, cache_purge_frequency)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}
Exemplo n.º 3
0
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        # Tracks how many times an unsatisfied dependency is found
        self._unsatisfied_dependency_count = COLLECTOR.counter(
            'unsatisfied_dependency_count', instance=self)
        # Tracks the length of the completer's _seen_txns
        self._seen_txns_length = COLLECTOR.gauge('seen_txns_length',
                                                 instance=self)
        # Tracks the length of the completer's _incomplete_blocks
        self._incomplete_blocks_length = COLLECTOR.gauge(
            'incomplete_blocks_length', instance=self)
        # Tracks the length of the completer's _incomplete_batches
        self._incomplete_batches_length = COLLECTOR.gauge(
            'incomplete_batches_length', instance=self)
Exemplo n.º 4
0
class Responder(object):
    def __init__(self,
                 completer,
                 cache_keep_time=300,
                 cache_purge_frequency=30):
        self.completer = completer
        self.pending_requests = TimedCache(cache_keep_time,
                                           cache_purge_frequency)
        self._lock = RLock()

    def check_for_block(self, block_id):
        # Ask Completer
        if block_id == "HEAD":
            block = self.completer.get_chain_head()
        else:
            block = self.completer.get_block(block_id)
        return block

    def check_for_batch(self, batch_id):
        batch = self.completer.get_batch(batch_id)
        return batch

    def check_for_batch_by_transaction(self, transaction_id):
        batch = self.completer.get_batch_by_transaction(transaction_id)
        return batch

    def already_requested(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                return True
            return False

    def add_request(self, requested_id, connection_id):
        with self._lock:
            if requested_id in self.pending_requests:
                if connection_id not in self.pending_requests[requested_id]:
                    self.pending_requests[requested_id] += [connection_id]

            else:
                self.pending_requests[requested_id] = [connection_id]

    def get_request(self, requested_id):
        with self._lock:
            return self.pending_requests.get(requested_id)

    def remove_request(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                del self.pending_requests[requested_id]
Exemplo n.º 5
0
    def handle(self, connection_id, message_content):
        block_response_message = GossipBlockResponse()
        block_response_message.ParseFromString(message_content)
        block = Block()
        block.ParseFromString(block_response_message.content)
        if block.header_signature in self._seen_cache:
            self.block_dropped_count.inc()
            return HandlerResult(status=HandlerStatus.DROP)

        if not is_valid_block(block):
            LOGGER.debug("requested block's signature is invalid: %s",
                         block.header_signature)
            return HandlerResult(status=HandlerStatus.DROP)

        self._seen_cache = TimedCache()
        return HandlerResult(status=HandlerStatus.PASS)
Exemplo n.º 6
0
 def __init__(self, network):
     self._network = network
     self._challenge_payload_cache = TimedCache(
         keep_time=AUTHORIZATION_CACHE_TIMEOUT)
Exemplo n.º 7
0
 def __init__(self, network, allowed_frequency=10):
     self._network = network
     self._last_message = TimedCache()
     self._allowed_frequency = allowed_frequency
Exemplo n.º 8
0
class BatchTracker(ChainObserver, InvalidTransactionObserver,
                   PendingBatchObserver):
    """Tracks batch statuses for this local validator, allowing interested
    components to check where a batch is in the validation process. It should
    only be relied on for batches submitted locally, and is not persisted
    after restart.

    When a batch moves from one component to another, the appropriate notify
    method should be called in the appropriate component, as specified by the
    relevant Observer class, and implemented here.

    Args:
        block_store (BlockStore): For querying if a batch is committed
        cache_keep_time (float): Time in seconds to keep values in TimedCaches
        cache_purge_frequency (float): Time between purging the TimedCaches
    """
    def __init__(self,
                 block_store,
                 cache_keep_time=600,
                 cache_purge_frequency=30):
        self._block_store = block_store
        self._batch_info = TimedCache(cache_keep_time, cache_purge_frequency)
        self._invalid = TimedCache(cache_keep_time, cache_purge_frequency)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}

    def chain_update(self, block, receipts):
        """Removes batches from the pending cache if found in the block store,
        and notifies any observers.
        """
        with self._lock:
            for batch_id in self._pending.copy():
                if self._block_store.has_batch(batch_id):
                    self._pending.remove(batch_id)
                    self._update_observers(batch_id,
                                           ClientBatchStatus.COMMITTED)

    def notify_txn_invalid(self, txn_id, message=None, extended_data=None):
        """Adds a batch id to the invalid cache along with the id of the
        transaction that was rejected and any error message or extended data.
        Removes that batch id from the pending set. The cache is only
        temporary, and the batch info will be purged after one hour.

        Args:
            txn_id (str): The id of the invalid batch
            message (str, optional): Message explaining why batch is invalid
            extended_data (bytes, optional): Additional error data
        """
        invalid_txn_info = {'id': txn_id}
        if message is not None:
            invalid_txn_info['message'] = message
        if extended_data is not None:
            invalid_txn_info['extended_data'] = extended_data

        with self._lock:
            for batch_id, txn_ids in self._batch_info.items():
                if txn_id in txn_ids:
                    if batch_id not in self._invalid:
                        self._invalid[batch_id] = [invalid_txn_info]
                    else:
                        self._invalid[batch_id].append(invalid_txn_info)
                    self._pending.discard(batch_id)
                    self._update_observers(batch_id, ClientBatchStatus.INVALID)
                    return

    def notify_batch_pending(self, batch):
        """Adds a Batch id to the pending cache, with its transaction ids.

        Args:
            batch (str): The id of the pending batch
        """
        txn_ids = {t.header_signature for t in batch.transactions}
        with self._lock:
            self._pending.add(batch.header_signature)
            self._batch_info[batch.header_signature] = txn_ids
            self._update_observers(batch.header_signature,
                                   ClientBatchStatus.PENDING)

    def get_status(self, batch_id):
        """Returns the status enum for a batch.

        Args:
            batch_id (str): The id of the batch to get the status for

        Returns:
            int: The status enum
        """
        with self._lock:
            if self._block_store.has_batch(batch_id):
                return ClientBatchStatus.COMMITTED
            if batch_id in self._invalid:
                return ClientBatchStatus.INVALID
            if batch_id in self._pending:
                return ClientBatchStatus.PENDING
            return ClientBatchStatus.UNKNOWN

    def get_statuses(self, batch_ids):
        """Returns a statuses dict for the requested batches.

        Args:
            batch_ids (list of str): The ids of the batches to get statuses for

        Returns:
            dict: A dict with keys of batch ids, and values of status enums
        """
        with self._lock:
            return {b: self.get_status(b) for b in batch_ids}

    def get_invalid_txn_info(self, batch_id):
        """Fetches the id of the Transaction that failed within a particular
        Batch, as well as any error message or other data about the failure.

        Args:
            batch_id (str): The id of the Batch containing an invalid txn

        Returns:
            list of dict: A list of dicts with three possible keys:
                * 'id' - the header_signature of the invalid Transaction
                * 'message' - the error message sent by the TP
                * 'extended_data' - any additional data sent by the TP
        """
        try:
            return self._invalid[batch_id]
        except KeyError:
            # If batch has been purged from the invalid cache before its txn
            # info is fetched, return an empty array of txn info
            return []

    def watch_statuses(self, observer, batch_ids):
        """Allows a component to register to be notified when a set of
        batches is no longer PENDING. Expects to be able to call the
        "notify_batches_finished" method on the registered component, sending
        the statuses of the batches.

        Args:
            observer (object): Must implement "notify_batches_finished" method
            batch_ids (list of str): The ids of the batches to watch
        """
        with self._lock:
            statuses = self.get_statuses(batch_ids)
            if self._has_no_pendings(statuses):
                observer.notify_batches_finished(statuses)
            else:
                self._observers[observer] = statuses

    def _update_observers(self, batch_id, status):
        """Updates each observer tracking a particular batch with its new
        status. If all statuses are no longer pending, notifies the observer
        and removes it from the list.
        """
        for observer, statuses in self._observers.copy().items():
            if batch_id in statuses:
                statuses[batch_id] = status
                if self._has_no_pendings(statuses):
                    observer.notify_batches_finished(statuses)
                    self._observers.pop(observer)

    def _has_no_pendings(self, statuses):
        """Returns True if a statuses dict has no PENDING statuses.
        """
        return all(s != ClientBatchStatus.PENDING for s in statuses.values())
Exemplo n.º 9
0
 def __init__(self):
     self._seen_cache = TimedCache()
     self._batch_dropped_count = COLLECTOR.counter(
         'already_validated_batch_dropped_count', instance=self)
Exemplo n.º 10
0
 def __init__(self, responder, gossip):
     self._responder = responder
     self._gossip = gossip
     self._seen_requests = TimedCache(CACHE_KEEP_TIME)