示例#1
0
 def __init__(self,
              block_store,
              gossip,
              cache_keep_time=300,
              cache_purge_frequency=30,
              requested_keep_time=1200):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_keep_time (float) Time in seconds to keep values in
         TimedCaches.
     :param cache_purge_frequency (float) Time between purging the
         TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_keep_time,
                                   cache_purge_frequency)
     self._block_store = block_store
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_keep_time,
                                           cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_keep_time,
                                          cache_purge_frequency)
     self._requested = TimedCache(requested_keep_time,
                                  cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self._has_block = None
     self.lock = RLock()
示例#2
0
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300,
                 metrics_registry=None):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        if metrics_registry:
            # Tracks how many times an unsatisfied dependency is found
            self._unsatisfied_dependency_count = CounterWrapper(
                metrics_registry.counter(
                    'completer.unsatisfied_dependency_count'))
            # Tracks the length of the completer's _seen_txns
            self._seen_txns_length = GaugeWrapper(
                metrics_registry.gauge('completer.seen_txns_length'))
            # Tracks the length of the completer's _incomplete_blocks
            self._incomplete_blocks_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_blocks_length'))
            # Tracks the length of the completer's _incomplete_batches
            self._incomplete_batches_length = GaugeWrapper(
                metrics_registry.gauge('completer.incomplete_batches_length'))
        else:
            self._unsatisfied_dependency_count = CounterWrapper()
            self._seen_txns_length = GaugeWrapper()
            self._incomplete_blocks_length = GaugeWrapper()
            self._incomplete_batches_length = GaugeWrapper()
示例#3
0
    def __init__(self, block_store):
        self._block_store = block_store
        self._batch_info = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._invalid = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}
示例#4
0
 def __init__(self,
              completer,
              cache_keep_time=300,
              cache_purge_frequency=30):
     self.completer = completer
     self.pending_requests = TimedCache(cache_keep_time,
                                        cache_purge_frequency)
     self._lock = RLock()
示例#5
0
    def __init__(self,
                 block_store,
                 cache_keep_time=600,
                 cache_purge_frequency=30):
        self._block_store = block_store
        self._batch_info = TimedCache(cache_keep_time, cache_purge_frequency)
        self._invalid = TimedCache(cache_keep_time, cache_purge_frequency)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}
示例#6
0
    def __init__(self,
                 block_store,
                 cache_keep_time=600,
                 cache_purge_frequency=30):
        self._block_store = block_store
        self._batch_info = TimedCache(cache_keep_time, cache_purge_frequency)
        self._invalid = TimedCache(cache_keep_time, cache_purge_frequency)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}
示例#7
0
    def test_access_update(self):

        bc = TimedCache(keep_time=1)

        bc["test"] = "value"
        bc["test2"] = "value2"
        self.assertEqual(len(bc), 2)

        bc["test"] = "value"
        bc.cache["test"].timestamp = bc.cache["test"].timestamp - 2
        bc["test"]  # access to update timestamp
        bc.purge_expired()
        self.assertEqual(len(bc), 2)
        self.assertTrue("test" in bc)
        self.assertTrue("test2" in bc)
示例#8
0
    def test_access_update(self):

        bc = TimedCache(keep_time=1)

        bc["test"] = "value"
        bc["test2"] = "value2"
        self.assertEqual(len(bc), 2)

        bc["test"] = "value"
        bc.cache["test"].timestamp = bc.cache["test"].timestamp - 2
        bc["test"]  # access to update timestamp
        bc.purge_expired()
        self.assertEqual(len(bc), 2)
        self.assertTrue("test" in bc)
        self.assertTrue("test2" in bc)
示例#9
0
class Responder(object):
    def __init__(self, completer, cache_purge_frequency=30):
        self.completer = completer
        self.pending_requests = TimedCache(cache_purge_frequency)
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency
        self._lock = RLock()

    def check_for_block(self, block_id):
        # Ask Completer
        if block_id == "HEAD":
            block = self.completer.get_chain_head()
        else:
            block = self.completer.get_block(block_id)
        return block

    def check_for_batch(self, batch_id):
        batch = self.completer.get_batch(batch_id)
        return batch

    def check_for_batch_by_transaction(self, transaction_id):
        batch = self.completer.get_batch_by_transaction(transaction_id)
        return batch

    def add_request(self, requested_id, connection_id):
        with self._lock:
            if requested_id in self.pending_requests:
                if connection_id not in self.pending_requests[requested_id]:
                    self.pending_requests[requested_id] += [connection_id]

            else:
                self.pending_requests[requested_id] = [connection_id]

    def get_request(self, requested_id):
        with self._lock:
            return self.pending_requests.get(requested_id)

    def remove_request(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                del self.pending_requests[requested_id]

    def purge_requests(self):
        with self._lock:
            if self._purge_time < time.time():
                LOGGER.debug("Purge pending_requests of expired entries.")
                self.pending_requests.purge_expired()
                self._purge_time = time.time() + self._cache_purge_frequency
示例#10
0
class Responder(object):
    def __init__(self, completer, cache_purge_frequency=30):
        self.completer = completer
        self.pending_requests = TimedCache(cache_purge_frequency)
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency
        self._lock = RLock()

    def check_for_block(self, block_id):
        # Ask Completer
        if block_id == "HEAD":
            block = self.completer.get_chain_head()
        else:
            block = self.completer.get_block(block_id)
        return block

    def check_for_batch(self, batch_id):
        batch = self.completer.get_batch(batch_id)
        return batch

    def check_for_batch_by_transaction(self, transaction_id):
        batch = self.completer.get_batch_by_transaction(transaction_id)
        return batch

    def add_request(self, requested_id, connection_id):
        with self._lock:
            if requested_id in self.pending_requests:
                if connection_id not in self.pending_requests[requested_id]:
                    self.pending_requests[requested_id] += [connection_id]

            else:
                self.pending_requests[requested_id] = [connection_id]

    def get_request(self, requested_id):
        with self._lock:
            return self.pending_requests.get(requested_id)

    def remove_request(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                del self.pending_requests[requested_id]

    def purge_requests(self):
        with self._lock:
            if self._purge_time < time.time():
                LOGGER.debug("Purge pending_requests of expired entries.")
                self.pending_requests.purge_expired()
                self._purge_time = time.time() + self._cache_purge_frequency
示例#11
0
 def __init__(self,
              completer,
              cache_keep_time=300,
              cache_purge_frequency=30):
     self.completer = completer
     self.pending_requests = TimedCache(cache_keep_time,
                                        cache_purge_frequency)
     self._lock = RLock()
示例#12
0
    def __init__(self, block_store):
        self._block_store = block_store
        self._batch_info = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._invalid = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}
 def __init__(self, metrics_registry=None):
     self._seen_cache = TimedCache()
     if metrics_registry:
         self._batch_dropped_count = CounterWrapper(
             metrics_registry.counter(
                 'already_validated_batch_dropped_count'))
     else:
         self._batch_dropped_count = CounterWrapper()
示例#14
0
class Responder(object):
    def __init__(self,
                 completer,
                 cache_keep_time=300,
                 cache_purge_frequency=30):
        self.completer = completer
        self.pending_requests = TimedCache(cache_keep_time,
                                           cache_purge_frequency)
        self._lock = RLock()

    def check_for_block(self, block_id):
        # Ask Completer
        if block_id == "HEAD":
            block = self.completer.get_federation_heads()
            #block = self.completer.get_chain_head()
        else:
            block = self.completer.get_block(block_id)
        return block

    def get_block_by_num(self, block_num):
        return self.completer.get_block_by_num(block_num)

    def check_for_batch(self, batch_id):
        batch = self.completer.get_batch(batch_id)
        return batch

    def check_for_batch_by_transaction(self, transaction_id):
        batch = self.completer.get_batch_by_transaction(transaction_id)
        return batch

    def already_requested(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                return True
            return False

    def add_request(self, requested_id, connection_id):
        LOGGER.debug("add_request [%s]=%s", requested_id[:8],
                     connection_id[:8])
        with self._lock:
            if requested_id in self.pending_requests:
                if connection_id not in self.pending_requests[requested_id]:
                    self.pending_requests[requested_id] += [connection_id]

            else:
                self.pending_requests[requested_id] = [connection_id]

    def get_request(self, requested_id):
        with self._lock:
            return self.pending_requests.get(requested_id)

    def remove_request(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                del self.pending_requests[requested_id]
示例#15
0
    def test_evict_expired(self):
        """ Test that values will be evicted from the
        cache as they time out.
        """

        # use an invasive technique so that we don't have to sleep for
        # the item to expire

        bc = TimedCache(keep_time=1)

        bc["test"] = "value"
        bc["test2"] = "value2"
        self.assertEqual(len(bc), 2)

        # test that expired item i
        bc.cache["test"].timestamp = bc.cache["test"].timestamp - 2
        bc.purge_expired()
        self.assertEqual(len(bc), 1)
        self.assertFalse("test" in bc)
        self.assertTrue("test2" in bc)
示例#16
0
    def test_evict_expired(self):
        """ Test that values will be evicted from the
        cache as they time out.
        """

        # use an invasive technique so that we don't have to sleep for
        # the item to expire

        bc = TimedCache(keep_time=1)

        bc["test"] = "value"
        bc["test2"] = "value2"
        self.assertEqual(len(bc), 2)

        # test that expired item i
        bc.cache["test"].timestamp = bc.cache["test"].timestamp - 2
        bc.purge_expired()
        self.assertEqual(len(bc), 1)
        self.assertFalse("test" in bc)
        self.assertTrue("test2" in bc)
示例#17
0
    def test_cache(self):
        bc = TimedCache(keep_time=1)

        with self.assertRaises(KeyError):
            bc["test"]

        bc["test"] = "value"

        self.assertEqual(len(bc), 1)

        del bc["test"]
        self.assertFalse("test" in bc)
示例#18
0
 def __init__(self, block_store, gossip):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     """
     self.gossip = gossip
     self.batch_cache = TimedCache()
     self.block_cache = BlockCache(block_store)
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._on_block_received = None
     self._on_batch_received = None
示例#19
0
 def __init__(self, block_store, gossip, cache_purge_frequency=30):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_purge_frequency (int) The time between purging the
             TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_purge_frequency)
     self._block_store = block_store
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self.lock = RLock()
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
示例#20
0
    def handle(self, connection_id, message_content):
        block, _ = message_content

        if block.header_signature in self._seen_cache:
            self.block_dropped_count.inc()
            return HandlerResult(status=HandlerStatus.DROP)

        if not is_valid_block(block):
            LOGGER.debug("requested block's signature is invalid: %s",
                         block.header_signature)
            return HandlerResult(status=HandlerStatus.DROP)

        self._seen_cache = TimedCache()
        return HandlerResult(status=HandlerStatus.PASS)
示例#21
0
class Responder(object):
    def __init__(self,
                 completer,
                 cache_keep_time=300,
                 cache_purge_frequency=30):
        self.completer = completer
        self.pending_requests = TimedCache(cache_keep_time,
                                           cache_purge_frequency)
        self._lock = RLock()

    def check_for_block(self, block_id):
        # Ask Completer
        if block_id == "HEAD":
            block = self.completer.get_chain_head()
        else:
            block = self.completer.get_block(block_id)
        return block

    def check_for_batch(self, batch_id):
        batch = self.completer.get_batch(batch_id)
        return batch

    def check_for_batch_by_transaction(self, transaction_id):
        batch = self.completer.get_batch_by_transaction(transaction_id)
        return batch

    def already_requested(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                return True
            return False

    def add_request(self, requested_id, connection_id):
        with self._lock:
            if requested_id in self.pending_requests:
                if connection_id not in self.pending_requests[requested_id]:
                    self.pending_requests[requested_id] += [connection_id]

            else:
                self.pending_requests[requested_id] = [connection_id]

    def get_request(self, requested_id):
        with self._lock:
            return self.pending_requests.get(requested_id)

    def remove_request(self, requested_id):
        with self._lock:
            if requested_id in self.pending_requests:
                del self.pending_requests[requested_id]
示例#22
0
    def test_access_update(self):

        bc = TimedCache(keep_time=1, purge_frequency=0)

        bc["test"] = "value"
        bc["test2"] = "value2"
        self.assertEqual(len(bc), 2)

        bc["test"] = "value"
        bc.cache["test"].timestamp = bc.cache["test"].timestamp - 2
        bc["test"]  # access to update timestamp
        bc["test2"] = "value2"  # set value to activate purge
        self.assertEqual(len(bc), 2)
        self.assertTrue("test" in bc)
        self.assertTrue("test2" in bc)
    def handle(self, connection_id, message_content):
        block_response_message = GossipBlockResponse()
        block_response_message.ParseFromString(message_content)
        block = Block()
        block.ParseFromString(block_response_message.content)
        if block.header_signature in self._seen_cache:
            self.block_dropped_count.inc()
            return HandlerResult(status=HandlerStatus.DROP)

        if not is_valid_block(block):
            LOGGER.debug("requested block's signature is invalid: %s",
                         block.header_signature)
            return HandlerResult(status=HandlerStatus.DROP)

        self._seen_cache = TimedCache()
        return HandlerResult(status=HandlerStatus.PASS)
示例#24
0
 def __init__(self, block_store, gossip, cache_purge_frequency=30):
     """
     :param block_store (dictionary) The block store shared with the journal
     :param gossip (gossip.Gossip) Broadcasts block and batch request to
             peers
     :param cache_purge_frequency (int) The time between purging the
             TimedCaches.
     """
     self.gossip = gossip
     self.batch_cache = TimedCache(cache_purge_frequency)
     self.block_cache = BlockCache(block_store, cache_purge_frequency)
     # avoid throwing away the genesis block
     self.block_cache[NULL_BLOCK_IDENTIFIER] = None
     self._seen_txns = TimedCache(cache_purge_frequency)
     self._incomplete_batches = TimedCache(cache_purge_frequency)
     self._incomplete_blocks = TimedCache(cache_purge_frequency)
     self._on_block_received = None
     self._on_batch_received = None
     self.lock = RLock()
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
示例#25
0
    def __init__(self,
                 block_cache,
                 transaction_committed,
                 get_committed_batch_by_id,
                 get_committed_batch_by_txn_id,
                 get_chain_head,
                 gossip,
                 cache_keep_time=1200,
                 cache_purge_frequency=30,
                 requested_keep_time=300):
        """
        :param block_cache (dictionary) The block cache to use for getting
            and storing blocks
        :param transaction_committed (fn(transaction_id) -> bool) A function to
            determine if a transaction is committed.
        :param batch_committed (fn(batch_id) -> bool) A function to
            determine if a batch is committed.
        :param get_committed_batch_by_txn_id
            (fn(transaction_id) -> Batch) A function for retrieving a committed
            batch from a committed transction id.
        :param get_chain_head (fn() -> Block) A function for getting the
            current chain head.
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        :param requested_keep_time (float) Time in seconds to keep the ids
            of requested objects. WARNING this time should always be less than
            cache_keep_time or the validator can get into a state where it
            fails to make progress because it thinks it has already requested
            something that it is missing.
        """
        self._gossip = gossip
        self._batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self._block_cache = block_cache

        self._transaction_committed = transaction_committed
        self._get_committed_batch_by_id = get_committed_batch_by_id
        self._get_committed_batch_by_txn_id = get_committed_batch_by_txn_id
        self._get_chain_head = get_chain_head

        # avoid throwing away the genesis block
        self._block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self.lock = RLock()

        # Tracks how many times an unsatisfied dependency is found
        self._unsatisfied_dependency_count = COLLECTOR.counter(
            'unsatisfied_dependency_count', instance=self)
        # Tracks the length of the completer's _seen_txns
        self._seen_txns_length = COLLECTOR.gauge(
            'seen_txns_length', instance=self)
        self._seen_txns_length.set_value(0)
        # Tracks the length of the completer's _incomplete_blocks
        self._incomplete_blocks_length = COLLECTOR.gauge(
            'incomplete_blocks_length', instance=self)
        self._incomplete_blocks_length.set_value(0)
        # Tracks the length of the completer's _incomplete_batches
        self._incomplete_batches_length = COLLECTOR.gauge(
            'incomplete_batches_length', instance=self)
        self._incomplete_batches_length.set_value(0)
示例#26
0
 def __init__(self, block_store):
     self.batch_cache = TimedCache()
     self.block_cache = BlockCache(block_store)
     self._on_block_received = None
     self._on_batch_received = None
示例#27
0
 def __init__(self):
     self._seen_cache = TimedCache()
示例#28
0
 def __init__(self, completer, cache_purge_frequency=30):
     self.completer = completer
     self.pending_requests = TimedCache(cache_purge_frequency)
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
     self._lock = RLock()
示例#29
0
 def __init__(self, network, allowed_frequency=10):
     self._network = network
     self._last_message = TimedCache()
     self._allowed_frequency = allowed_frequency
示例#30
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self,
                 block_store,
                 gossip,
                 cache_keep_time=300,
                 cache_purge_frequency=30,
                 requested_keep_time=1200):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_keep_time (float) Time in seconds to keep values in
            TimedCaches.
        :param cache_purge_frequency (float) Time between purging the
            TimedCaches.
        """
        self.gossip = gossip

        self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_keep_time,
                                      cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_keep_time,
                                              cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_keep_time,
                                             cache_purge_frequency)
        self._requested = TimedCache(requested_keep_time,
                                     cache_purge_frequency)
        self._pending_heads = {}
        self._on_block_received = None
        self._on_batch_received = None
        self._has_block = None
        self._has_genesis_federation_block = None
        self._is_nests_ready = None
        self._incomplete_loop = False
        self.lock = RLock()
        self.gossip.set_add_batch(self.add_batch)

    @property
    def incomplete_blocks(self):
        return [
            "{}({})".format(bid[:8], len(blks))
            for bid, blks in self._incomplete_blocks.items()
        ]

    @property
    def requested(self):
        return ["{}".format(bid[:8]) for bid in self._requested.keys()]

    def _complete_block(self, block, nest=False):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """
        def insert_lost_block(block):
            for pid, blocks in self._incomplete_blocks.items():
                LOGGER.debug("CHECK INSER LOST for=%s blocks=%s", pid[:8], [
                    "{}.{}".format(blk.block_num, blk.header_signature[:8])
                    for blk in blocks
                ])
                for i, blk in enumerate(blocks):
                    if blk.block_num == block.block_num + 1:
                        blocks.insert(i, block)
                        LOGGER.debug("INSER LOST for=%s blocks=%s", pid[:8], [
                            "{}.{}".format(blk.block_num,
                                           blk.header_signature[:8])
                            for blk in blocks
                        ])
                        return

        def check_missing_predecessor(previous_block_id, previous_block_num):
            # we should take all chain of predecessors
            if previous_block_id not in self.block_cache:
                if self._has_block(block.previous_block_id):
                    return False
                return True
            else:
                # check num

                blk = self.block_cache[previous_block_id]
                LOGGER.debug("Check in cache block: %s", blk)
                if blk is not None and Federation.is_diff_feder(
                        previous_block_num, blk.block_num):
                    LOGGER.debug("Check in cache num=%s~%s block: %s",
                                 previous_block_num, blk.block_num,
                                 previous_block_id[:8])
                    try:
                        self._block_store.get_block_by_number(
                            int(previous_block_num))
                    except KeyError:
                        return True
                    return False
                return False
                #return blk is not None and blk.block_num != previous_block_num

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None
        """
        check federation of block and corresponding nest 
        in case nest is absent keep this block until nest appeared

        """
        if nest and not self._has_genesis_federation(block.block_num):
            LOGGER.debug("Keep until get federation nest for block: %s", block)
            if block.block_num not in self._pending_heads:
                self._pending_heads[block.block_num] = block
            return None
        #LOGGER.debug("complete block=%s",block)
        previous_block_num = Federation.dec_feder_num(block.block_num)
        if check_missing_predecessor(block.previous_block_id,
                                     previous_block_num):
            if True:  #not self._has_block(block.previous_block_id):
                """
                block incompleted ask missing block
                """
                LOGGER.debug("Incomplete block=%s.%s incompletes=%s",
                             block.block_num, block.header_signature[:8],
                             len(self.incomplete_blocks))
                #previous_block_num = Federation.dec_feder_num(block.block_num)
                if previous_block_num not in self._incomplete_blocks:
                    self._incomplete_blocks[previous_block_num] = [block]
                if block.previous_block_id not in self._incomplete_blocks:
                    self._incomplete_blocks[block.previous_block_id] = [block]
                elif block not in self._incomplete_blocks[
                        block.previous_block_id]:
                    # insert before block with more number
                    self._incomplete_blocks[block.previous_block_id] += [block]
                    """
                    for i, blk in enumerate(self._incomplete_blocks[block.previous_block_id]):
                        if blk.block_num == block.block_num:
                            break
                        if blk.block_num >  block.block_num:
                            # insert before
                            self._incomplete_blocks[block.previous_block_id].insert(i,block)
                            LOGGER.debug("incomplete_blocks key=%s  blocks=%s",block.previous_block_id[:8],[blk.header_signature[:8] for blk in self._incomplete_blocks[block.previous_block_id]])
                            break
                    """
                # We have already requested the block, do not do so again
                if block.previous_block_id in self._requested:
                    LOGGER.debug("Missing predecessor=%s already requested=%s",
                                 block.previous_block_id[:8], self.requested)
                    return None
                #if block.header_signature not in self._incomplete_blocks:
                # it could be block from others branch try to find
                #insert_lost_block(block)

                LOGGER.debug("Request missing predecessor: %s.%s IS=%s num=%s",
                             block.block_num, block.previous_block_id[:8],
                             block.header_signature in self._incomplete_blocks,
                             len(self._incomplete_blocks))
                self._requested[block.previous_block_id] = None
                self.gossip.broadcast_block_request(block.previous_block_id,
                                                    block.block_num)
                return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]

                    # We have already requested the batch, do not do so again
                    if batch_id in self._requested:
                        return None
                    self._requested[batch_id] = None
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            if block.header_signature in self._requested:
                del self._requested[block.header_signature]
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]
                LOGGER.debug("Block completed BLOCK=%s", block)
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                if block.header_signature in self._requested:
                    del self._requested[block.header_signature]

                return block
            else:
                LOGGER.debug(
                    "Block.header.batch_ids does not match set of batches in block.batches Dropping %s",
                    block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    LOGGER.debug(
                        "Transaction %s in batch %s has unsatisfied dependency: %s",
                        txn.header_signature, batch.header_signature,
                        dependency)

                    # Check to see if the dependency has already been requested
                    if dependency not in self._requested:
                        dependencies.append(dependency)
                        self._requested[dependency] = None
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            self._seen_txns[txn.header_signature] = batch.header_signature

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key, num_mode=False):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)
            LOGGER.debug("process_incomplete_blocks mode=%s KEY=%s blocks=%s",
                         num_mode, key[:8], self.incomplete_blocks)
            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    LOGGER.debug(
                        "process_incomplete_blocks MY_KEY=%s inc_blocks=%s",
                        my_key[:8], [
                            "{}.{}".format(blk.block_num,
                                           blk.header_signature[:8])
                            for blk in inc_blocks
                        ])
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[
                                inc_block.header_signature] = inc_block
                            LOGGER.debug("ADD BLOCK=%s.%s INCOMP",
                                         inc_block.block_num,
                                         inc_block.header_signature[:8])
                            self._on_block_received(inc_block)
                            to_complete.append(
                                inc_block.header_signature
                                if not num_mode else str(inc_block.block_num))
                    del self._incomplete_blocks[my_key]

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        # this is func which send batch to publisher QUEUE block_publisher.queue_batch
        self._on_batch_received = on_batch_received_func

    def set_chain_has_block(self, set_chain_has_block,
                            set_has_genesis_federation_block,
                            set_is_nests_ready):
        self._has_block = set_chain_has_block
        self._has_genesis_federation_block = set_has_genesis_federation_block
        self._is_nests_ready = set_is_nests_ready

    @property
    def is_pending_head(self):
        mode = (len(self._pending_heads) >
                0) or (not self._is_nests_ready()) or self._incomplete_loop
        LOGGER.debug(
            "IS_PENDING_HEAD nest_ready=%s incomp=%s heads=%s mode=%s\n",
            self._is_nests_ready(), self._incomplete_loop,
            len(self._pending_heads), mode)
        return mode

    def recover_block(self, blkws):
        LOGGER.debug("RECOVER BLOCKS=%s", len(blkws))
        with self.lock:
            for blkw in blkws:
                self._on_block_received(blkw)

    def add_block(self, block, check_pending=False, nest=False):
        with self.lock:
            if check_pending:
                if len(self._pending_heads) == 0:
                    return
                #take from pending queue
                _, blkw = self._pending_heads.popitem()
            else:
                blkw = BlockWrapper(block)
            # new block from net

            while True:
                block = self._complete_block(blkw, nest)

                if block is not None:
                    # completed block - in sync mode genesis block
                    self.block_cache[block.header_signature] = blkw
                    LOGGER.debug("ADD BLOCK=%s.%s PROCESS INCOMPLETED",
                                 block.block_num, block.header_signature[:8])
                    """
                    PUT BLOCK into chain controller queue
                    """
                    self._on_block_received(blkw)
                    # take all rest blocks
                    #self._process_incomplete_blocks(block.header_signature)
                    self._incomplete_loop = True
                    self._process_incomplete_blocks(str(block.block_num), True)
                    self._incomplete_loop = False
                    LOGGER.debug(
                        "ADD INCOMPLETED BLOCKS DONE  nest_ready=%s pending=%s\n",
                        self._is_nests_ready(),
                        [bnum for bnum in self._pending_heads.keys()])
                    if len(self._pending_heads) == 0:
                        break
                    if not self._is_nests_ready():
                        break
                    _, blkw = self._pending_heads.popitem()

                else:
                    break

    def add_batch(self, batch, recomm=None):
        """
        candidate_id - use it for DAG version (id,block_num)
        """
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(
                    batch,
                    recomm)  # send to publisher block_publisher.queue_batch
                self._process_incomplete_blocks(batch.header_signature)
                if batch.header_signature in self._requested:
                    del self._requested[batch.header_signature]
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        if txn.header_signature in self._requested:
                            del self._requested[txn.header_signature]
                        self._process_incomplete_batches(txn.header_signature)

    def get_chain_head(self):
        """Returns the block which is the current head of the chain.

        Returns:
            BlockWrapper: The head of the chain.
        """
        with self.lock:
            #LOGGER.debug("federation_heads=%s",[blk.header_signature[:8] for blk in self._block_store.federation_heads])
            return self._block_store.chain_head

    def _has_genesis_federation(self, block_num):
        """
        check for not genesis federation is genesis feder first block present or not 
        into block store or into queue
        """
        fnum, _ = Federation.feder_num_to_num(block_num)
        if fnum < 2:
            return True
        # not genesis - check genesis first block into chain queue or store
        return self._has_genesis_federation_block()

    def _has_federation_nest(self, block_num):
        with self.lock:
            return self._block_store.has_federation(block_num)

    def get_federation_heads(self, feder=None):
        #all heads or exactly feder
        with self.lock:
            return self._block_store.federation_heads

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_block_by_num(self, block_num):
        with self.lock:
            return self._block_store.get_block_by_number(block_num)

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None
示例#31
0
class BatchTracker(StoreUpdateObserver,
                   InvalidTransactionObserver,
                   PendingBatchObserver):
    """Tracks batch statuses for this local validator, allowing interested
    components to check where a batch is in the validation process. It should
    only be relied on for batches submitted locally, and is not persisted
    after restart.

    When a batch moves from one component to another, the appropriate notify
    method should be called in the appropriate component, as specified by the
    relevant Observer class, and implemented here.

    Args:
        block_store (BlockStore): For querying if a batch is committed
    """
    def __init__(self, block_store):
        self._block_store = block_store
        self._batch_info = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._invalid = TimedCache(keep_time=CACHE_KEEP_TIME)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}

    def notify_store_updated(self):
        """Removes batches from the pending cache if found in the block store,
        and notifies any observers.
        """
        with self._lock:
            for batch_id in self._pending.copy():
                if self._block_store.has_batch(batch_id):
                    self._pending.remove(batch_id)
                    self._update_observers(batch_id, BatchStatus.COMMITTED)

    def notify_txn_invalid(self, txn_id, message=None, extended_data=None):
        """Adds a batch id to the invalid cache along with the id of the
        transaction that was rejected and any error message or extended data.
        Removes that batch id from the pending set. The cache is only
        temporary, and the batch info will be purged after one hour.

        Args:
            txn_id (str): The id of the invalid batch
            message (str, optional): Message explaining why batch is invalid
            extended_data (bytes, optional): Additional error data
        """
        invalid_txn_info = {'id': txn_id}
        if message is not None:
            invalid_txn_info['message'] = message
        if extended_data is not None:
            invalid_txn_info['extended_data'] = extended_data

        with self._lock:
            for batch_id, txn_ids in self._batch_info.items():
                if txn_id in txn_ids:
                    if batch_id not in self._invalid:
                        self._invalid[batch_id] = [invalid_txn_info]
                    else:
                        self._invalid[batch_id].append(invalid_txn_info)
                    self._pending.discard(batch_id)
                    self._update_observers(batch_id, BatchStatus.INVALID)
                    return

    def notify_batch_pending(self, batch):
        """Adds a Batch id to the pending cache, with its transaction ids.

        Args:
            batch (str): The id of the pending batch
        """
        txn_ids = {t.header_signature for t in batch.transactions}
        with self._lock:
            self._pending.add(batch.header_signature)
            self._batch_info[batch.header_signature] = txn_ids
            self._update_observers(batch.header_signature, BatchStatus.PENDING)

    def get_status(self, batch_id):
        """Returns the status enum for a batch.

        Args:
            batch_id (str): The id of the batch to get the status for

        Returns:
            int: The status enum
        """
        with self._lock:
            if self._block_store.has_batch(batch_id):
                return BatchStatus.COMMITTED
            if batch_id in self._invalid:
                return BatchStatus.INVALID
            if batch_id in self._pending:
                return BatchStatus.PENDING
            return BatchStatus.UNKNOWN

    def get_statuses(self, batch_ids):
        """Returns a statuses dict for the requested batches.

        Args:
            batch_ids (list of str): The ids of the batches to get statuses for

        Returns:
            dict: A dict with keys of batch ids, and values of status enums
        """
        with self._lock:
            return {b: self.get_status(b) for b in batch_ids}

    def get_invalid_txn_info(self, batch_id):
        """Fetches the id of the Transaction that failed within a particular
        Batch, as well as any error message or other data about the failure.

        Args:
            batch_id (str): The id of the Batch containing an invalid txn

        Returns:
            list of dict: A list of dicts with three possible keys:
                * 'id' - the header_signature of the invalid Transaction
                * 'message' - the error message sent by the TP
                * 'extended_data' - any additional data sent by the TP
        """
        try:
            return self._invalid[batch_id]
        except KeyError:
            # If batch has been purged from the invalid cache before its txn
            # info is fetched, return an empty array of txn info
            return []

    def watch_statuses(self, observer, batch_ids):
        """Allows a component to register to be notified when a set of
        batches is no longer PENDING. Expects to be able to call the
        "notify_batches_finished" method on the registered component, sending
        the statuses of the batches.

        Args:
            observer (object): Must implement "notify_batches_finished" method
            batch_ids (list of str): The ids of the batches to watch
        """
        with self._lock:
            statuses = self.get_statuses(batch_ids)
            if self._has_no_pendings(statuses):
                observer.notify_finished(statuses)
            else:
                self._observers[observer] = statuses

    def _update_observers(self, batch_id, status):
        """Updates each observer tracking a particular batch with its new
        status. If all statuses are no longer pending, notifies the observer
        and removes it from the list.
        """
        for observer, statuses in self._observers.copy().items():
            if batch_id in statuses:
                statuses[batch_id] = status
                if self._has_no_pendings(statuses):
                    observer.notify_batches_finished(statuses)
                    self._observers.pop(observer)

    def _has_no_pendings(self, statuses):
        """Returns True if a statuses dict has no PENDING statuses.
        """
        return all(s != BatchStatus.PENDING for s in statuses.values())
示例#32
0
 def __init__(self, completer, cache_purge_frequency=30):
     self.completer = completer
     self.pending_requests = TimedCache(cache_purge_frequency)
     self._cache_purge_frequency = cache_purge_frequency
     self._purge_time = time.time() + self._cache_purge_frequency
     self._lock = RLock()
示例#33
0
 def __init__(self, responder, gossip):
     self._responder = responder
     self._gossip = gossip
     self._seen_requests = TimedCache(CACHE_KEEP_TIME)
示例#34
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self, block_store, gossip, cache_purge_frequency=30):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_purge_frequency (int) The time between purging the
                TimedCaches.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_purge_frequency)
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self.lock = RLock()
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            LOGGER.debug("Request missing predecessor: %s",
                         block.previous_block_id)
            if block.previous_block_id not in self._incomplete_blocks:
                self._incomplete_blocks[block.previous_block_id] = [block]
            elif block not in self._incomplete_blocks[block.previous_block_id]:
                self._incomplete_blocks[block.previous_block_id] += [block]

            self.gossip.broadcast_block_request(block.previous_block_id)
            return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                return block
            else:
                LOGGER.debug(
                    "Block.header.batch_ids does not match set of "
                    "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    LOGGER.debug(
                        "Transaction %s in batch %s has "
                        "unsatisfied dependency: %s", txn.header_signature,
                        batch.header_signature, dependency)

                    dependencies.append(dependency)
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            if txn.header_signature in self._seen_txns and \
                    self._seen_txns[txn.header_signature] == \
                    batch.header_signature:
                break
            self._seen_txns[txn.header_signature] = batch.header_signature

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def _purge_caches(self):
        if self._purge_time < time.time():
            LOGGER.debug("Purges caches of expired entries.")
            self._seen_txns.purge_expired()
            self._incomplete_batches.purge_expired()
            self._incomplete_blocks.purge_expired()
            self.batch_cache.purge_expired()
            self.block_cache.purge_expired()
            self._purge_time = time.time() + self._cache_purge_frequency

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
                self._purge_caches()

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        self._process_incomplete_batches(txn.header_signature)

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None
示例#35
0
 def __init__(self, network):
     self._network = network
     self._challenge_payload_cache = TimedCache(
         keep_time=AUTHORIZATION_CACHE_TIMEOUT)
示例#36
0
class BatchTracker(ChainObserver, InvalidTransactionObserver,
                   PendingBatchObserver):
    """Tracks batch statuses for this local validator, allowing interested
    components to check where a batch is in the validation process. It should
    only be relied on for batches submitted locally, and is not persisted
    after restart.

    When a batch moves from one component to another, the appropriate notify
    method should be called in the appropriate component, as specified by the
    relevant Observer class, and implemented here.

    Args:
        block_store (BlockStore): For querying if a batch is committed
        cache_keep_time (float): Time in seconds to keep values in TimedCaches
        cache_purge_frequency (float): Time between purging the TimedCaches
    """
    def __init__(self,
                 block_store,
                 cache_keep_time=600,
                 cache_purge_frequency=30):
        self._block_store = block_store
        self._batch_info = TimedCache(cache_keep_time, cache_purge_frequency)
        self._invalid = TimedCache(cache_keep_time, cache_purge_frequency)
        self._pending = set()

        self._lock = RLock()
        self._observers = {}

    def chain_update(self, block, receipts):
        """Removes batches from the pending cache if found in the block store,
        and notifies any observers.
        """
        with self._lock:
            for batch_id in self._pending.copy():
                if self._block_store.has_batch(batch_id):
                    self._pending.remove(batch_id)
                    self._update_observers(batch_id, BatchStatus.COMMITTED)

    def notify_txn_invalid(self, txn_id, message=None, extended_data=None):
        """Adds a batch id to the invalid cache along with the id of the
        transaction that was rejected and any error message or extended data.
        Removes that batch id from the pending set. The cache is only
        temporary, and the batch info will be purged after one hour.

        Args:
            txn_id (str): The id of the invalid batch
            message (str, optional): Message explaining why batch is invalid
            extended_data (bytes, optional): Additional error data
        """
        invalid_txn_info = {'id': txn_id}
        if message is not None:
            invalid_txn_info['message'] = message
        if extended_data is not None:
            invalid_txn_info['extended_data'] = extended_data

        with self._lock:
            for batch_id, txn_ids in self._batch_info.items():
                if txn_id in txn_ids:
                    if batch_id not in self._invalid:
                        self._invalid[batch_id] = [invalid_txn_info]
                    else:
                        self._invalid[batch_id].append(invalid_txn_info)
                    self._pending.discard(batch_id)
                    self._update_observers(batch_id, BatchStatus.INVALID)
                    return

    def notify_batch_pending(self, batch):
        """Adds a Batch id to the pending cache, with its transaction ids.

        Args:
            batch (str): The id of the pending batch
        """
        txn_ids = {t.header_signature for t in batch.transactions}
        with self._lock:
            self._pending.add(batch.header_signature)
            self._batch_info[batch.header_signature] = txn_ids
            self._update_observers(batch.header_signature, BatchStatus.PENDING)

    def get_status(self, batch_id):
        """Returns the status enum for a batch.

        Args:
            batch_id (str): The id of the batch to get the status for

        Returns:
            int: The status enum
        """
        with self._lock:
            if self._block_store.has_batch(batch_id):
                return BatchStatus.COMMITTED
            if batch_id in self._invalid:
                return BatchStatus.INVALID
            if batch_id in self._pending:
                return BatchStatus.PENDING
            return BatchStatus.UNKNOWN

    def get_statuses(self, batch_ids):
        """Returns a statuses dict for the requested batches.

        Args:
            batch_ids (list of str): The ids of the batches to get statuses for

        Returns:
            dict: A dict with keys of batch ids, and values of status enums
        """
        with self._lock:
            return {b: self.get_status(b) for b in batch_ids}

    def get_invalid_txn_info(self, batch_id):
        """Fetches the id of the Transaction that failed within a particular
        Batch, as well as any error message or other data about the failure.

        Args:
            batch_id (str): The id of the Batch containing an invalid txn

        Returns:
            list of dict: A list of dicts with three possible keys:
                * 'id' - the header_signature of the invalid Transaction
                * 'message' - the error message sent by the TP
                * 'extended_data' - any additional data sent by the TP
        """
        try:
            return self._invalid[batch_id]
        except KeyError:
            # If batch has been purged from the invalid cache before its txn
            # info is fetched, return an empty array of txn info
            return []

    def watch_statuses(self, observer, batch_ids):
        """Allows a component to register to be notified when a set of
        batches is no longer PENDING. Expects to be able to call the
        "notify_batches_finished" method on the registered component, sending
        the statuses of the batches.

        Args:
            observer (object): Must implement "notify_batches_finished" method
            batch_ids (list of str): The ids of the batches to watch
        """
        with self._lock:
            statuses = self.get_statuses(batch_ids)
            if self._has_no_pendings(statuses):
                observer.notify_batches_finished(statuses)
            else:
                self._observers[observer] = statuses

    def _update_observers(self, batch_id, status):
        """Updates each observer tracking a particular batch with its new
        status. If all statuses are no longer pending, notifies the observer
        and removes it from the list.
        """
        for observer, statuses in self._observers.copy().items():
            if batch_id in statuses:
                statuses[batch_id] = status
                if self._has_no_pendings(statuses):
                    observer.notify_batches_finished(statuses)
                    self._observers.pop(observer)

    def _has_no_pendings(self, statuses):
        """Returns True if a statuses dict has no PENDING statuses.
        """
        return all(s != BatchStatus.PENDING for s in statuses.values())
示例#37
0
 def __init__(self):
     self._seen_cache = TimedCache()
     self._batch_dropped_count = COLLECTOR.counter(
         'already_validated_batch_dropped_count', instance=self)
示例#38
0
class Completer(object):
    """
    The Completer is responsible for making sure blocks are formally
    complete before they are delivered to the chain controller. A formally
    complete block is a block whose predecessor is in the block cache and all
    the batches are present in the batch list and in the order specified by the
    block header. If the predecessor or a batch is missing, a request message
    is sent sent out over the gossip network. It also checks that all batches
    have their dependencies satisifed, otherwise it will request the batch that
    has the missing transaction.
    """
    def __init__(self, block_store, gossip, cache_purge_frequency=30):
        """
        :param block_store (dictionary) The block store shared with the journal
        :param gossip (gossip.Gossip) Broadcasts block and batch request to
                peers
        :param cache_purge_frequency (int) The time between purging the
                TimedCaches.
        """
        self.gossip = gossip
        self.batch_cache = TimedCache(cache_purge_frequency)
        self.block_cache = BlockCache(block_store, cache_purge_frequency)
        self._block_store = block_store
        # avoid throwing away the genesis block
        self.block_cache[NULL_BLOCK_IDENTIFIER] = None
        self._seen_txns = TimedCache(cache_purge_frequency)
        self._incomplete_batches = TimedCache(cache_purge_frequency)
        self._incomplete_blocks = TimedCache(cache_purge_frequency)
        self._on_block_received = None
        self._on_batch_received = None
        self.lock = RLock()
        self._cache_purge_frequency = cache_purge_frequency
        self._purge_time = time.time() + self._cache_purge_frequency

    def _complete_block(self, block):
        """ Check the block to see if it is complete and if it can be passed to
            the journal. If the block's predecessor is not in the block_cache
            the predecessor is requested and the current block is added to the
            the incomplete_block cache. If the block.batches and
            block.header.batch_ids are not the same length, the batch_id list
            is checked against the batch_cache to see if the batch_list can be
            built. If any batches are missing from the block and we do not have
            the batches in the batch_cache, they are requested. The block is
            then added to the incomplete_block cache. If we can complete the
            block, a new batch list is created in the correct order and added
            to the block. The block is now considered complete and is returned.
            If block.batches and block.header.batch_ids are the same length,
            the block's batch list needs to be in the same order as the
            block.header.batch_ids list. If the block has all of its expected
            batches but are not in the correct order, the batch list is rebuilt
            and added to the block. Once a block has the correct batch list it
            is added to the block_cache and is returned.

        """

        if block.header_signature in self.block_cache:
            LOGGER.debug("Drop duplicate block: %s", block)
            return None

        if block.previous_block_id not in self.block_cache:
            LOGGER.debug("Request missing predecessor: %s",
                         block.previous_block_id)
            if block.previous_block_id not in self._incomplete_blocks:
                self._incomplete_blocks[block.previous_block_id] = [block]
            elif block not in self._incomplete_blocks[block.previous_block_id]:
                self._incomplete_blocks[block.previous_block_id] += [block]

            self.gossip.broadcast_block_request(block.previous_block_id)
            return None

        # Check for same number of batch_ids and batches
        # If different starting building batch list, Otherwise there is a batch
        # that does not belong, block should be dropped.
        if len(block.batches) > len(block.header.batch_ids):
            LOGGER.debug("Block has extra batches. Dropping %s", block)
            return None

        # used to supplement batch_cache, contains batches already in block
        temp_batches = {}
        for batch in block.batches:
            temp_batches[batch.header_signature] = batch

        # The block is missing batches. Check to see if we can complete it.
        if len(block.batches) != len(block.header.batch_ids):
            building = True
            for batch_id in block.header.batch_ids:
                if batch_id not in self.batch_cache and \
                        batch_id not in temp_batches:
                    # Request all missing batches
                    self.gossip.broadcast_batch_by_batch_id_request(batch_id)
                    if batch_id not in self._incomplete_blocks:
                        self._incomplete_blocks[batch_id] = [block]
                    elif block not in self._incomplete_blocks[batch_id]:
                        self._incomplete_blocks[batch_id] += [block]
                    building = False

            if not building:
                # The block cannot be completed.
                return None

            batches = self._finalize_batch_list(block, temp_batches)
            del block.batches[:]
            # reset batches with full list batches
            block.batches.extend(batches)
            return block

        else:
            batch_id_list = [x.header_signature for x in block.batches]
            # Check to see if batchs are in the correct order.
            if batch_id_list == list(block.header.batch_ids):
                return block
            # Check to see if the block has all batch_ids and they can be put
            # in the correct order
            elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)):
                batches = self._finalize_batch_list(block, temp_batches)
                # Clear batches from block
                del block.batches[:]
                # reset batches with full list batches
                if batches is not None:
                    block.batches.extend(batches)
                else:
                    return None

                return block
            else:
                LOGGER.debug("Block.header.batch_ids does not match set of "
                             "batches in block.batches Dropping %s", block)
                return None

    def _finalize_batch_list(self, block, temp_batches):
        batches = []
        for batch_id in block.header.batch_ids:
            if batch_id in self.batch_cache:
                batches.append(self.batch_cache[batch_id])
            elif batch_id in temp_batches:
                batches.append(temp_batches[batch_id])
            else:
                return None

        return batches

    def _complete_batch(self, batch):
        valid = True
        dependencies = []
        for txn in batch.transactions:
            txn_header = TransactionHeader()
            txn_header.ParseFromString(txn.header)
            for dependency in txn_header.dependencies:
                # Check to see if the dependency has been seen or is in the
                # current chain (block_store)
                if dependency not in self._seen_txns and not \
                        self.block_cache.block_store.has_transaction(
                        dependency):
                    LOGGER.debug("Transaction %s in batch %s has "
                                 "unsatisfied dependency: %s",
                                 txn.header_signature,
                                 batch.header_signature,
                                 dependency)

                    dependencies.append(dependency)
                    if dependency not in self._incomplete_batches:
                        self._incomplete_batches[dependency] = [batch]
                    elif batch not in self._incomplete_batches[dependency]:
                        self._incomplete_batches[dependency] += [batch]
                    valid = False
        if not valid:
            self.gossip.broadcast_batch_by_transaction_id_request(
                dependencies)

        return valid

    def _add_seen_txns(self, batch):
        for txn in batch.transactions:
            if txn.header_signature in self._seen_txns and \
                    self._seen_txns[txn.header_signature] == \
                    batch.header_signature:
                break
            self._seen_txns[txn.header_signature] = batch.header_signature

    def _process_incomplete_batches(self, key):
        # Keys are transaction_id
        if key in self._incomplete_batches:
            batches = self._incomplete_batches[key]
            for batch in batches:
                self.add_batch(batch)
            del self._incomplete_batches[key]

    def _process_incomplete_blocks(self, key):
        # Keys are either a block_id or batch_id
        if key in self._incomplete_blocks:
            to_complete = deque()
            to_complete.append(key)

            while to_complete:
                my_key = to_complete.popleft()
                if my_key in self._incomplete_blocks:
                    inc_blocks = self._incomplete_blocks[my_key]
                    for inc_block in inc_blocks:
                        if self._complete_block(inc_block):
                            self.block_cache[inc_block.header_signature] = \
                                inc_block
                            self._on_block_received(inc_block)
                            to_complete.append(inc_block.header_signature)
                    del self._incomplete_blocks[my_key]

    def _purge_caches(self):
        if self._purge_time < time.time():
            LOGGER.debug("Purges caches of expired entries.")
            self._seen_txns.purge_expired()
            self._incomplete_batches.purge_expired()
            self._incomplete_blocks.purge_expired()
            self.batch_cache.purge_expired()
            self.block_cache.purge_expired()
            self._purge_time = time.time() + self._cache_purge_frequency

    def set_on_block_received(self, on_block_received_func):
        self._on_block_received = on_block_received_func

    def set_on_batch_received(self, on_batch_received_func):
        self._on_batch_received = on_batch_received_func

    def add_block(self, block):
        with self.lock:
            blkw = BlockWrapper(block)
            block = self._complete_block(blkw)
            if block is not None:
                self.block_cache[block.header_signature] = blkw
                self._on_block_received(blkw)
                self._process_incomplete_blocks(block.header_signature)
                self._purge_caches()

    def add_batch(self, batch):
        with self.lock:
            if batch.header_signature in self.batch_cache:
                return
            if self._complete_batch(batch):
                self.batch_cache[batch.header_signature] = batch
                self._add_seen_txns(batch)
                self._on_batch_received(batch)
                self._process_incomplete_blocks(batch.header_signature)
                # If there was a batch waiting on this transaction, process
                # that batch
                for txn in batch.transactions:
                    if txn.header_signature in self._incomplete_batches:
                        self._process_incomplete_batches(txn.header_signature)

    def get_chain_head(self):
        """Returns the block which is the current head of the chain.

        Returns:
            BlockWrapper: The head of the chain.
        """
        with self.lock:
            return self._block_store.chain_head

    def get_block(self, block_id):
        with self.lock:
            if block_id in self.block_cache:
                return self.block_cache[block_id]
            return None

    def get_batch(self, batch_id):
        with self.lock:
            if batch_id in self.batch_cache:
                return self.batch_cache[batch_id]

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch(batch_id)
                except ValueError:
                    return None

    def get_batch_by_transaction(self, transaction_id):
        with self.lock:
            if transaction_id in self._seen_txns:
                batch_id = self._seen_txns[transaction_id]
                return self.get_batch(batch_id)

            else:
                block_store = self.block_cache.block_store
                try:
                    return block_store.get_batch_by_transaction(transaction_id)
                except ValueError:
                    return None