Пример #1
0
 def setUp(self):
     _set_test_mode(TestMode.TEST_ALL_WEIGHT)
     self.tmpdirs = []
     self.clock = Clock()
     self.clock.advance(time.time())
     self.log = logger.new()
     self.reset_peer_id_pool()
     self.rng = Random()
Пример #2
0
 def __init__(self, seed: Optional[int] = None):
     self.log = logger.new()
     if seed is None:
         seed = secrets.randbits(64)
     self.seed = seed
     self.rng = Random(self.seed)
     self._network = 'testnet'
     self._clock = HeapClock()
     self._peers: OrderedDict[str, HathorManager] = OrderedDict()
     self._connections: List['FakeConnection'] = []
     self._started = False
Пример #3
0
    def get_random_parents(self, rng: Random) -> Tuple[bytes, bytes]:
        """ Get parents from self.parents plus a random choice from self.parents_any to make it 3 in total.

        Using tuple as return type to make it explicit that the length is always 2.
        """
        assert len(self.must_include) <= 1
        fill = rng.ordered_sample(self.can_include, 2 - len(self.must_include))
        p1, p2 = self.must_include[:] + fill
        return p1, p2
Пример #4
0
    def get_random_parents(self, rng: Random) -> Tuple[bytes, bytes, bytes]:
        """ Get parents from self.parents plus a random choice from self.parents_any to make it 3 in total.

        Return type is tuple just to make it clear that the length is always 3.
        """
        assert 1 <= len(self.parents) <= 3
        more_parents = rng.ordered_sample(self.parents_any,
                                          3 - len(self.parents))
        p1, p2, p3 = self.parents[:] + more_parents
        return p1, p2, p3
Пример #5
0
    def create_peer(self, network: Optional[str] = None, peer_id: Optional[PeerId] = None,
                    enable_sync_v1: bool = True, enable_sync_v2: bool = True,
                    soft_voided_tx_ids: Optional[Set[bytes]] = None) -> HathorManager:
        assert self._started
        if network is None:
            network = self._network

        wallet = HDWallet(gap_limit=2)
        wallet._manually_initialize()

        assert peer_id is not None  # XXX: temporary, for checking that tests are using the peer_id
        if peer_id is None:
            peer_id = PeerId()
        tx_storage = TransactionMemoryStorage()
        manager = HathorManager(
            self._clock,
            peer_id=peer_id,
            network=network,
            wallet=wallet,
            enable_sync_v1=enable_sync_v1,
            enable_sync_v2=enable_sync_v2,
            tx_storage=tx_storage,
            rng=Random(self.rng.getrandbits(64)),
            soft_voided_tx_ids=soft_voided_tx_ids,
        )

        manager.reactor = self._clock
        manager._full_verification = True
        manager.start()
        self.run_to_completion()

        # Don't use it anywhere else. It is unsafe to generate mnemonic words like this.
        # It should be used only for testing purposes.
        m = Mnemonic('english')
        words = m.to_mnemonic(self.rng.randbytes(32))
        self.log.debug('randomized step: generate wallet', words=words)
        wallet.unlock(words=words, tx_storage=manager.tx_storage)
        return manager
Пример #6
0
    def __init__(self,
                 reactor: IReactorCore,
                 peer_id: Optional[PeerId] = None,
                 network: Optional[str] = None,
                 hostname: Optional[str] = None,
                 pubsub: Optional[PubSubManager] = None,
                 wallet: Optional[BaseWallet] = None,
                 tx_storage: Optional[TransactionStorage] = None,
                 peer_storage: Optional[Any] = None,
                 default_port: int = 40403,
                 wallet_index: bool = False,
                 stratum_port: Optional[int] = None,
                 ssl: bool = True,
                 enable_sync_v1: bool = True,
                 enable_sync_v2: bool = False,
                 capabilities: Optional[List[str]] = None,
                 checkpoints: Optional[List[Checkpoint]] = None,
                 rng: Optional[Random] = None,
                 soft_voided_tx_ids: Optional[Set[bytes]] = None) -> None:
        """
        :param reactor: Twisted reactor which handles the mainloop and the events.
        :param peer_id: Id of this node. If not given, a new one is created.
        :param network: Name of the network this node participates. Usually it is either testnet or mainnet.
        :type network: string

        :param hostname: The hostname of this node. It is used to generate its entrypoints.
        :type hostname: string

        :param pubsub: If not given, a new one is created.
        :type pubsub: :py:class:`hathor.pubsub.PubSubManager`

        :param tx_storage: Required storage backend.
        :type tx_storage: :py:class:`hathor.transaction.storage.transaction_storage.TransactionStorage`

        :param peer_storage: If not given, a new one is created.
        :type peer_storage: :py:class:`hathor.p2p.peer_storage.PeerStorage`

        :param default_port: Network default port. It is used when only ip addresses are discovered.
        :type default_port: int

        :param wallet_index: If should add a wallet index in the storage
        :type wallet_index: bool

        :param stratum_port: Stratum server port. Stratum server will only be created if it is not None.
        :type stratum_port: Optional[int]
        """
        from hathor.metrics import Metrics
        from hathor.p2p.factory import HathorClientFactory, HathorServerFactory
        from hathor.p2p.manager import ConnectionsManager

        if not (enable_sync_v1 or enable_sync_v2):
            raise TypeError(
                f'{type(self).__name__}() at least one sync version is required'
            )

        if tx_storage is None:
            raise TypeError(
                f'{type(self).__name__}() missing 1 required positional argument: \'tx_storage\''
            )

        self.log = logger.new()

        if rng is None:
            rng = Random()
        self.rng = rng

        self.reactor = reactor
        if hasattr(self.reactor, 'addSystemEventTrigger'):
            self.reactor.addSystemEventTrigger('after', 'shutdown', self.stop)

        self.state: Optional[HathorManager.NodeState] = None
        self.profiler: Optional[Any] = None

        # Hostname, used to be accessed by other peers.
        self.hostname = hostname

        # Remote address, which can be different from local address.
        self.remote_address = None

        self.my_peer = peer_id or PeerId()
        self.network = network or 'testnet'

        self.is_started: bool = False

        self.cpu = cpu

        # XXX: first checkpoint must be genesis (height=0)
        self.checkpoints: List[Checkpoint] = checkpoints or []
        self.checkpoints_ready: List[bool] = [False] * len(self.checkpoints)
        if not self.checkpoints or self.checkpoints[0].height > 0:
            self.checkpoints.insert(0,
                                    Checkpoint(0, settings.GENESIS_BLOCK_HASH))
            self.checkpoints_ready.insert(0, True)
        else:
            self.checkpoints_ready[0] = True

        # XXX Should we use a singleton or a new PeerStorage? [msbrogli 2018-08-29]
        self.pubsub = pubsub or PubSubManager(self.reactor)
        self.tx_storage = tx_storage
        self.tx_storage.pubsub = self.pubsub
        if wallet_index and self.tx_storage.with_index:
            assert self.tx_storage.indexes is not None
            self.log.debug('enable wallet indexes')
            self.tx_storage.indexes.enable_address_index(self.pubsub)
            self.tx_storage.indexes.enable_tokens_index()

        self.metrics = Metrics(
            pubsub=self.pubsub,
            avg_time_between_blocks=settings.AVG_TIME_BETWEEN_BLOCKS,
            tx_storage=self.tx_storage,
            reactor=self.reactor,
        )

        self.soft_voided_tx_ids = soft_voided_tx_ids or set()
        self.consensus_algorithm = ConsensusAlgorithm(self.soft_voided_tx_ids)

        self.peer_discoveries: List[PeerDiscovery] = []

        self.ssl = ssl
        self.server_factory = HathorServerFactory(self.network,
                                                  self.my_peer,
                                                  node=self,
                                                  use_ssl=ssl)
        self.client_factory = HathorClientFactory(self.network,
                                                  self.my_peer,
                                                  node=self,
                                                  use_ssl=ssl)
        self.connections = ConnectionsManager(self.reactor,
                                              self.my_peer,
                                              self.server_factory,
                                              self.client_factory,
                                              self.pubsub,
                                              self,
                                              ssl,
                                              whitelist_only=False,
                                              rng=self.rng,
                                              enable_sync_v1=enable_sync_v1,
                                              enable_sync_v2=enable_sync_v2)

        self.wallet = wallet
        if self.wallet:
            self.wallet.pubsub = self.pubsub
            self.wallet.reactor = self.reactor

        if stratum_port:
            # XXX: only import if needed
            from hathor.stratum import StratumFactory
            self.stratum_factory: Optional[StratumFactory] = StratumFactory(
                manager=self, port=stratum_port)
        else:
            self.stratum_factory = None
        # Set stratum factory for metrics object
        self.metrics.stratum_factory = self.stratum_factory

        self._allow_mining_without_peers = False

        # Thread pool used to resolve pow when sending tokens
        self.pow_thread_pool = ThreadPool(minthreads=0,
                                          maxthreads=settings.MAX_POW_THREADS,
                                          name='Pow thread pool')

        # List of addresses to listen for new connections (eg: [tcp:8000])
        self.listen_addresses: List[str] = []

        # Full verification execute all validations for transactions and blocks when initializing the node
        # Can be activated on the command line with --full-verification
        self._full_verification = False

        # List of whitelisted peers
        self.peers_whitelist: List[str] = []

        # List of capabilities of the peer
        if capabilities is not None:
            self.capabilities = capabilities
        else:
            self.capabilities = DEFAULT_CAPABILITIES
Пример #7
0
class TestCase(unittest.TestCase):
    _enable_sync_v1: bool
    _enable_sync_v2: bool
    use_memory_storage: bool = USE_MEMORY_STORAGE

    def setUp(self):
        _set_test_mode(TestMode.TEST_ALL_WEIGHT)
        self.tmpdirs = []
        self.clock = Clock()
        self.clock.advance(time.time())
        self.log = logger.new()
        self.reset_peer_id_pool()
        self.rng = Random()

    def tearDown(self):
        self.clean_tmpdirs()

    def reset_peer_id_pool(self) -> None:
        self._free_peer_id_pool = self.new_peer_id_pool()

    def new_peer_id_pool(self) -> List[PeerId]:
        return PEER_ID_POOL.copy()

    def get_random_peer_id_from_pool(self,
                                     pool: Optional[List[PeerId]] = None,
                                     rng: Optional[Random] = None) -> PeerId:
        if pool is None:
            pool = self._free_peer_id_pool
        if not pool:
            raise RuntimeError('no more peer ids on the pool')
        if rng is None:
            rng = self.rng
        peer_id = self.rng.choice(pool)
        pool.remove(peer_id)
        return peer_id

    def _create_test_wallet(self):
        """ Generate a Wallet with a number of keypairs for testing
            :rtype: Wallet
        """
        tmpdir = tempfile.mkdtemp()
        self.tmpdirs.append(tmpdir)

        wallet = Wallet(directory=tmpdir)
        wallet.unlock(b'MYPASS')
        wallet.generate_keys(count=20)
        wallet.lock()
        return wallet

    def create_peer(self,
                    network,
                    peer_id=None,
                    wallet=None,
                    tx_storage=None,
                    unlock_wallet=True,
                    wallet_index=False,
                    capabilities=None,
                    full_verification=True,
                    enable_sync_v1=None,
                    enable_sync_v2=None,
                    checkpoints=None):
        if enable_sync_v1 is None:
            assert hasattr(self, '_enable_sync_v1'), (
                '`_enable_sync_v1` has no default by design, either set one on '
                'the test class or pass `enable_sync_v1` by argument')
            enable_sync_v1 = self._enable_sync_v1
        if enable_sync_v2 is None:
            assert hasattr(self, '_enable_sync_v2'), (
                '`_enable_sync_v2` has no default by design, either set one on '
                'the test class or pass `enable_sync_v2` by argument')
            enable_sync_v2 = self._enable_sync_v2
        assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version'

        if peer_id is None:
            peer_id = PeerId()
        if not wallet:
            wallet = self._create_test_wallet()
            if unlock_wallet:
                wallet.unlock(b'MYPASS')
        if tx_storage is None:
            if self.use_memory_storage:
                from hathor.transaction.storage.memory_storage import TransactionMemoryStorage
                tx_storage = TransactionMemoryStorage()
            else:
                from hathor.transaction.storage.rocksdb_storage import TransactionRocksDBStorage
                directory = tempfile.mkdtemp()
                self.tmpdirs.append(directory)
                tx_storage = TransactionRocksDBStorage(directory)
        manager = HathorManager(
            self.clock,
            peer_id=peer_id,
            network=network,
            wallet=wallet,
            tx_storage=tx_storage,
            wallet_index=wallet_index,
            capabilities=capabilities,
            rng=self.rng,
            enable_sync_v1=enable_sync_v1,
            enable_sync_v2=enable_sync_v2,
            checkpoints=checkpoints,
        )

        # XXX: just making sure that tests set this up correctly
        if enable_sync_v2:
            assert SyncVersion.V2 in manager.connections._sync_factories
        else:
            assert SyncVersion.V2 not in manager.connections._sync_factories
        if enable_sync_v1:
            assert SyncVersion.V1 in manager.connections._sync_factories
        else:
            assert SyncVersion.V1 not in manager.connections._sync_factories

        manager.avg_time_between_blocks = 0.0001
        manager._full_verification = full_verification
        manager.start()
        self.run_to_completion()
        return manager

    def run_to_completion(self):
        """ This will advance the test's clock until all calls scheduled are done.
        """
        for call in self.clock.getDelayedCalls():
            amount = call.getTime() - self.clock.seconds()
            self.clock.advance(amount)

    def assertTipsEqual(self, manager1, manager2):
        s1 = set(manager1.tx_storage.get_all_tips())
        s2 = set(manager2.tx_storage.get_all_tips())
        self.assertEqual(s1, s2)

        s1 = set(manager1.tx_storage.get_tx_tips())
        s2 = set(manager2.tx_storage.get_tx_tips())
        self.assertEqual(s1, s2)

    def assertTipsNotEqual(self, manager1, manager2):
        s1 = set(manager1.tx_storage.get_all_tips())
        s2 = set(manager2.tx_storage.get_all_tips())
        self.assertNotEqual(s1, s2)

    def assertConsensusEqual(self, manager1, manager2):
        self.assertEqual(manager1.tx_storage.get_count_tx_blocks(),
                         manager2.tx_storage.get_count_tx_blocks())
        for tx1 in manager1.tx_storage.get_all_transactions():
            tx2 = manager2.tx_storage.get_transaction(tx1.hash)
            tx1_meta = tx1.get_metadata()
            tx2_meta = tx2.get_metadata()
            # conflict_with's type is Optional[List[bytes]], so we convert to a set because order does not matter.
            self.assertEqual(set(tx1_meta.conflict_with or []),
                             set(tx2_meta.conflict_with or []))
            # Soft verification
            if tx1_meta.voided_by is None:
                # If tx1 is not voided, then tx2 must be not voided.
                self.assertIsNone(tx2_meta.voided_by)
            else:
                # If tx1 is voided, then tx2 must be voided.
                self.assertGreaterEqual(len(tx1_meta.voided_by), 1)
                self.assertGreaterEqual(len(tx2_meta.voided_by), 1)
            # Hard verification
            # self.assertEqual(tx1_meta.voided_by, tx2_meta.voided_by)

    def assertConsensusValid(self, manager):
        for tx in manager.tx_storage.get_all_transactions():
            if tx.is_block:
                self.assertBlockConsensusValid(tx)
            else:
                self.assertTransactionConsensusValid(tx)

    def assertBlockConsensusValid(self, block):
        self.assertTrue(block.is_block)
        if not block.parents:
            # Genesis
            return
        meta = block.get_metadata()
        if meta.voided_by is None:
            parent = block.get_block_parent()
            parent_meta = parent.get_metadata()
            self.assertIsNone(parent_meta.voided_by)

    def assertTransactionConsensusValid(self, tx):
        self.assertFalse(tx.is_block)
        meta = tx.get_metadata()
        if meta.voided_by and tx.hash in meta.voided_by:
            # If a transaction voids itself, then it must have at
            # least one conflict.
            self.assertTrue(meta.conflict_with)

        is_tx_executed = bool(not meta.voided_by)
        for h in meta.conflict_with or []:
            tx2 = tx.storage.get_transaction(h)
            meta2 = tx2.get_metadata()
            is_tx2_executed = bool(not meta2.voided_by)
            self.assertFalse(is_tx_executed and is_tx2_executed)

        for txin in tx.inputs:
            spent_tx = tx.get_spent_tx(txin)
            spent_meta = spent_tx.get_metadata()

            if spent_meta.voided_by is not None:
                self.assertIsNotNone(meta.voided_by)
                self.assertTrue(spent_meta.voided_by)
                self.assertTrue(meta.voided_by)
                self.assertTrue(spent_meta.voided_by.issubset(meta.voided_by))

        for parent in tx.get_parents():
            parent_meta = parent.get_metadata()
            if parent_meta.voided_by is not None:
                self.assertIsNotNone(meta.voided_by)
                self.assertTrue(parent_meta.voided_by)
                self.assertTrue(meta.voided_by)
                self.assertTrue(parent_meta.voided_by.issubset(meta.voided_by))

    def clean_tmpdirs(self):
        for tmpdir in self.tmpdirs:
            shutil.rmtree(tmpdir)

    def clean_pending(self, required_to_quiesce=True):
        """
        This handy method cleans all pending tasks from the reactor.

        When writing a unit test, consider the following question:

            Is the code that you are testing required to release control once it
            has done its job, so that it is impossible for it to later come around
            (with a delayed reactor task) and do anything further?

        If so, then trial will usefully test that for you -- if the code under
        test leaves any pending tasks on the reactor then trial will fail it.

        On the other hand, some code is *not* required to release control -- some
        code is allowed to continuously maintain control by rescheduling reactor
        tasks in order to do ongoing work.  Trial will incorrectly require that
        code to clean up all its tasks from the reactor.

        Most people think that such code should be amended to have an optional
        "shutdown" operation that releases all control, but on the contrary it is
        good design for some code to *not* have a shutdown operation, but instead
        to have a "crash-only" design in which it recovers from crash on startup.

        If the code under test is of the "long-running" kind, which is *not*
        required to shutdown cleanly in order to pass tests, then you can simply
        call testutil.clean_pending() at the end of the unit test, and trial will
        be satisfied.

        Copy from: https://github.com/zooko/pyutil/blob/master/pyutil/testutil.py#L68
        """
        pending = reactor.getDelayedCalls()
        active = bool(pending)
        for p in pending:
            if p.active():
                p.cancel()
            else:
                print('WEIRDNESS! pending timed call not active!')
        if required_to_quiesce and active:
            self.fail(
                'Reactor was still active when it was required to be quiescent.'
            )

    def get_address(self, index: int) -> Optional[str]:
        """ Generate a fixed HD Wallet and return an address
        """
        from hathor.wallet import HDWallet
        words = (
            'bind daring above film health blush during tiny neck slight clown salmon '
            'wine brown good setup later omit jaguar tourist rescue flip pet salute'
        )

        hd = HDWallet(words=words)
        hd._manually_initialize()

        if index >= hd.gap_limit:
            return None

        return list(hd.keys.keys())[index]
Пример #8
0
class Simulator:
    # used to concilite monkeypatching and multiple instances
    _patches_rc: int = 0

    @classmethod
    def _apply_patches(cls):
        """ Applies global patches on modules that aren't easy/possible to configure otherwise.

        Patches:

        - disable pow verification
        - set DAA test-mode to DISABLED (will actually run the pow function, that won't actually verify the pow)
        - override AVG_TIME_BETWEEN_BLOCKS to 64
        """
        from hathor.transaction import BaseTransaction

        def verify_pow(self: BaseTransaction) -> None:
            assert self.hash is not None

        cls._original_verify_pow = BaseTransaction.verify_pow
        BaseTransaction.verify_pow = verify_pow

        _set_test_mode(TestMode.DISABLED)

        from hathor import daa
        cls._original_avg_time_between_blocks = daa.AVG_TIME_BETWEEN_BLOCKS
        daa.AVG_TIME_BETWEEN_BLOCKS = 64

    @classmethod
    def _remove_patches(cls):
        """ Remove the patches previously applied.
        """
        from hathor.transaction import BaseTransaction
        BaseTransaction.verify_pow = cls._original_verify_pow

        from hathor import daa
        daa.AVG_TIME_BETWEEN_BLOCKS = cls._original_avg_time_between_blocks

    @classmethod
    def _patches_rc_increment(cls):
        """ This is used by when starting instances of Simulator to determine when to run _apply_patches"""
        assert cls._patches_rc >= 0
        cls._patches_rc += 1
        if cls._patches_rc < 2:
            # patches not yet applied
            cls._apply_patches()

    @classmethod
    def _patches_rc_decrement(cls):
        """ This is used by when stopping instances of Simulator to determine when to run _remove_patches"""
        assert cls._patches_rc >= 0
        cls._patches_rc -= 1
        if cls._patches_rc == 0:
            # patches not needed anymore
            cls._remove_patches()

    def __init__(self, seed: Optional[int] = None):
        self.log = logger.new()
        if seed is None:
            seed = secrets.randbits(64)
        self.seed = seed
        self.rng = Random(self.seed)
        self._network = 'testnet'
        self._clock = HeapClock()
        self._peers: OrderedDict[str, HathorManager] = OrderedDict()
        self._connections: List['FakeConnection'] = []
        self._started = False

    def start(self) -> None:
        """Has to be called before any other method can be called."""
        assert not self._started
        self._started = True
        self._patches_rc_increment()
        first_timestamp = min(tx.timestamp for tx in _get_genesis_transactions_unsafe(None))
        dt = self.rng.randint(3600, 120 * 24 * 3600)
        self._clock.advance(first_timestamp + dt)
        self.log.debug('randomized step: clock advance start', dt=dt)

    def stop(self) -> None:
        """Can only stop after calling start, but it doesn't matter if it's paused or not"""
        assert self._started
        self._started = False
        self._patches_rc_decrement()

    def create_peer(self, network: Optional[str] = None, peer_id: Optional[PeerId] = None,
                    enable_sync_v1: bool = True, enable_sync_v2: bool = True,
                    soft_voided_tx_ids: Optional[Set[bytes]] = None) -> HathorManager:
        assert self._started
        if network is None:
            network = self._network

        wallet = HDWallet(gap_limit=2)
        wallet._manually_initialize()

        assert peer_id is not None  # XXX: temporary, for checking that tests are using the peer_id
        if peer_id is None:
            peer_id = PeerId()
        tx_storage = TransactionMemoryStorage()
        manager = HathorManager(
            self._clock,
            peer_id=peer_id,
            network=network,
            wallet=wallet,
            enable_sync_v1=enable_sync_v1,
            enable_sync_v2=enable_sync_v2,
            tx_storage=tx_storage,
            rng=Random(self.rng.getrandbits(64)),
            soft_voided_tx_ids=soft_voided_tx_ids,
        )

        manager.reactor = self._clock
        manager._full_verification = True
        manager.start()
        self.run_to_completion()

        # Don't use it anywhere else. It is unsafe to generate mnemonic words like this.
        # It should be used only for testing purposes.
        m = Mnemonic('english')
        words = m.to_mnemonic(self.rng.randbytes(32))
        self.log.debug('randomized step: generate wallet', words=words)
        wallet.unlock(words=words, tx_storage=manager.tx_storage)
        return manager

    def create_tx_generator(self, peer: HathorManager, *args: Any, **kwargs: Any) -> RandomTransactionGenerator:
        return RandomTransactionGenerator(peer, self.rng, *args, **kwargs)

    def create_miner(self, peer: HathorManager, *args: Any, **kwargs: Any) -> MinerSimulator:
        return MinerSimulator(peer, self.rng, *args, **kwargs)

    def run_to_completion(self):
        """ This will advance the test's clock until all calls scheduled are done.
        """
        assert self._started
        for call in self._clock.getDelayedCalls():
            amount = max(0, call.getTime() - self._clock.seconds())
            self._clock.advance(amount)

    def add_peer(self, name: str, peer: HathorManager) -> None:
        assert self._started
        if name in self._peers:
            raise ValueError('Duplicate peer name')
        self._peers[name] = peer

    def get_peer(self, name: str) -> HathorManager:
        return self._peers[name]

    def add_connection(self, conn: 'FakeConnection') -> None:
        self._connections.append(conn)

    def _run(self, interval: float, step: float, status_interval: float) -> Generator[None, None, None]:
        """ Implementation of run, yields at every step to allow verifications like in run_until_complete
        """
        assert self._started
        initial = self._clock.seconds()
        latest_time = self._clock.seconds()
        t0 = time.time()
        while self._clock.seconds() <= initial + interval:
            for conn in self._connections:
                conn.run_one_step()
            yield
            if self._clock.seconds() - latest_time >= status_interval:
                t1 = time.time()
                # Real elapsed time.
                real_elapsed_time = t1 - t0
                # Rate is the number of simulated seconds per real second.
                # For example, a rate of 60 means that we can simulate 1 minute per second.
                rate = (self._clock.seconds() - initial) / real_elapsed_time
                # Simulation now.
                sim_now = self._clock.seconds()
                # Simulation dt.
                sim_dt = self._clock.seconds() - initial
                # Number of simulated seconds to end this run.
                sim_remaining = interval - self._clock.seconds() + initial
                # Number of call pending to be executed.
                delayed_calls = len(self._clock.getDelayedCalls())
                self.log.info('simulator: time step', real_elapsed_time=real_elapsed_time, rate=rate, sim_now=sim_now,
                              dt_step=sim_dt, dt_remaining=sim_remaining, delayed_calls=delayed_calls)
                latest_time = self._clock.seconds()
            self._clock.advance(step)

    def run_until_complete(self,
                           max_interval: float,
                           step: float = DEFAULT_STEP_INTERVAL,
                           status_interval: float = DEFAULT_STATUS_INTERVAL) -> bool:
        """ Will stop when all peers have synced/errored (-> True), or when max_interval is elapsed (-> False).

        Make sure miners/tx_generators are stopped or this will almost certainly run until max_interval.
        """
        assert self._started
        for _ in self._run(max_interval, step, status_interval):
            if all(not conn.can_step() for conn in self._connections):
                return True
        return False

    def run(self,
            interval: float,
            step: float = DEFAULT_STEP_INTERVAL,
            status_interval: float = DEFAULT_STATUS_INTERVAL) -> None:
        assert self._started
        for _ in self._run(interval, step, status_interval):
            pass
Пример #9
0
 def choose_random_template(self, rng: Random) -> BlockTemplate:
     """ Randomly choose and return a template and use that for generating a block, see BlockTemplate"""
     return rng.choice(self)