def test_create_from_json(self): p1 = PeerId() data1 = p1.to_json(include_private_key=True) p2 = PeerId.create_from_json(data1) data2 = p2.to_json(include_private_key=True) self.assertEqual(data1, data2) p2.validate()
def test_match_peer_id(self): network = 'testnet' peer_id1 = PeerId() peer_id2 = PeerId() manager1 = self.create_peer(network, peer_id=peer_id1) manager2 = self.create_peer(network, peer_id=peer_id2) conn = FakeConnection(manager1, manager2) self.assertTrue(conn.proto2.is_state(conn.proto2.PeerState.HELLO)) matcher = NetfilterMatchPeerId(str(peer_id1.id)) context = NetfilterContext(protocol=conn.proto2) self.assertFalse(matcher.match(context)) conn.run_one_step() self.assertTrue(conn.proto2.is_state(conn.proto2.PeerState.PEER_ID)) self.assertFalse(matcher.match(context)) # Success because the connection is ready and proto2 is connected to proto1. conn.run_one_step() conn.run_one_step() self.assertTrue(conn.proto2.is_state(conn.proto2.PeerState.READY)) self.assertTrue(matcher.match(context)) # Fail because proto1 is connected to proto2, and the peer id cannot match. context = NetfilterContext(protocol=conn.proto1) self.assertFalse(matcher.match(context))
def main() -> None: from hathor.p2p.peer_id import PeerId peer_id = PeerId() data = peer_id.to_json(include_private_key=True) txt = json.dumps(data, indent=4) print(txt)
def getPeerCertificate(self): from OpenSSL import crypto # we use a new peer here just to save the trouble of manually creating a certificate random_peer = PeerId('testnet') return crypto.X509.from_cryptography( random_peer.get_certificate())
def setUp(self): super().setUp() self.network = 'testnet' self.peer_id1 = PeerId() self.peer_id2 = PeerId() self.manager1 = self.create_peer(self.network, peer_id=self.peer_id1) self.manager2 = self.create_peer(self.network, peer_id=self.peer_id2) self.conn = FakeConnection(self.manager1, self.manager2)
def test_create_from_json_without_private_key(self): p1 = PeerId() data1 = p1.to_json() # Just to test a part of the code del data1['entrypoints'] p2 = PeerId.create_from_json(data1) data2 = p2.to_json() self.assertEqual(data2['entrypoints'], []) data1['entrypoints'] = [] self.assertEqual(data1, data2) p2.validate()
def test_connecting_peers(self): response = yield self.web.post('p2p/peers', ['tcp://localhost:8006']) data = response.json_value() self.assertTrue(data['success']) # test when we send a peer we're already connected to peer = PeerId() peer.entrypoints = ['tcp://localhost:8006'] self.manager.connections.peer_storage.add(peer) response = yield self.web.post( 'p2p/peers', ['tcp://localhost:8006', 'tcp://localhost:8007']) data = response.json_value() self.assertTrue(data['success']) self.assertEqual(data['peers'], ['tcp://localhost:8007'])
def test_save_peer_file(self): p = PeerId() tmpdir = tempfile.mkdtemp() path = os.path.join(tmpdir, 'peer.json') p.save_to_file(path) with open(path, 'r') as f: peer_from_file = json.loads(f.read()) self.assertEqual(p.to_json(include_private_key=True), peer_from_file) # Removing tmpdir shutil.rmtree(tmpdir)
def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unlock_wallet=True, wallet_index=False, capabilities=None): if peer_id is None: peer_id = PeerId() if not wallet: wallet = self._create_test_wallet() if unlock_wallet: wallet.unlock(b'MYPASS') manager = HathorManager( self.clock, peer_id=peer_id, network=network, wallet=wallet, tx_storage=tx_storage, wallet_index=wallet_index, capabilities=capabilities, ) manager.avg_time_between_blocks = 0.0001 manager.test_mode = TestMode.TEST_ALL_WEIGHT manager._full_verification = True manager.start() self.run_to_completion() return manager
def _manager_kwargs(self): peer_id = PeerId() network = 'testnet' wallet = self._create_test_wallet() tx_storage = getattr(self, 'tx_storage', None) if tx_storage is None: if self.use_memory_storage: from hathor.transaction.storage.memory_storage import TransactionMemoryStorage tx_storage = TransactionMemoryStorage() else: from hathor.transaction.storage.rocksdb_storage import TransactionRocksDBStorage directory = tempfile.mkdtemp() self.tmpdirs.append(directory) tx_storage = TransactionRocksDBStorage(directory) assert ( hasattr(self, '_enable_sync_v1') and hasattr(self, '_enable_sync_v2') and (self._enable_sync_v1 or self._enable_sync_v2) ), ( 'Please set both `_enable_sync_v1` and `_enable_sync_v2` on the class. ' 'Also they can\'t both be False. ' 'This is by design so we don\'t forget to test for multiple sync versions.' ) return dict( peer_id=peer_id, network=network, wallet=wallet, tx_storage=tx_storage, wallet_index=True, enable_sync_v1=self._enable_sync_v1, enable_sync_v2=self._enable_sync_v2, )
def test_retry_logic(self): peer = PeerId('testnet') peer.retry_attempts = settings.MAX_PEER_CONNECTION_ATTEMPS self.assertFalse(peer.can_retry(0)) peer.retry_attempts = 0 # should still fail as the RETRIES_EXCEEDED flag is already set self.assertFalse(peer.can_retry(0)) # remove flag and try again from hathor.p2p.peer_id import PeerFlags peer.flags.remove(PeerFlags.RETRIES_EXCEEDED) self.assertTrue(peer.can_retry(0)) peer.retry_timestamp = 100 self.assertFalse(peer.can_retry(0))
def test_validate_certificate(self): peer = PeerId('testnet') protocol = HathorProtocol('testnet', peer, None, node=None, use_ssl=True) class FakeTransport: def getPeerCertificate(self): from OpenSSL import crypto # we use a new peer here just to save the trouble of manually creating a certificate random_peer = PeerId('testnet') return crypto.X509.from_cryptography( random_peer.get_certificate()) protocol.transport = FakeTransport() result = peer.validate_certificate(protocol) self.assertFalse(result)
def _manager_kwargs(self): peer_id = PeerId() network = 'testnet' wallet = self._create_test_wallet() tx_storage = getattr(self, 'tx_storage', None) return dict( peer_id=peer_id, network=network, wallet=wallet, tx_storage=tx_storage, wallet_index=True )
def handle_peers(self, payload: str) -> None: """ Executed when a PEERS command is received. It updates the list of known peers (and tries to connect to new ones). """ received_peers = json.loads(payload) for data in received_peers: peer = PeerId.create_from_json(data) peer.validate() if self.protocol.connections: self.protocol.connections.on_receive_peer(peer, origin=self) remote = self.protocol.transport.getPeer() self.log.debug('received peers', remote=remote, payload=payload)
def test_retry_connection(self): p = PeerId() interval = p.retry_interval p.update_retry_timestamp(0) self.assertEqual( settings.PEER_CONNECTION_RETRY_INTERVAL_MULTIPLIER * interval, p.retry_interval) self.assertEqual(p.retry_interval, p.retry_timestamp) # when retry_interval is already 180 p.retry_interval = 190 p.update_retry_timestamp(0) self.assertEqual(180, p.retry_interval) # reset p.reset_retry_timestamp() self.assertEqual(p.retry_interval, 5) self.assertEqual(p.retry_timestamp, 0)
def connect_to_if_not_connected(self, peer: PeerId, now: int) -> None: """ Attempts to connect if it is not connected to the peer. """ import random if not peer.entrypoints: return if peer.id in self.connected_peers: return assert peer.id is not None if peer.can_retry(now): self.connect_to(random.choice(peer.entrypoints), peer)
def handle_peer_id(self, payload: str) -> Generator[Any, Any, None]: """ Executed when a PEER-ID is received. It basically checks the identity of the peer. Only after this step, the peer connection is considered established and ready to communicate. """ protocol = self.protocol data = json.loads(payload) peer = PeerId.create_from_json(data) peer.validate() assert peer.id is not None # If the connection URL had a peer-id parameter we need to check it's the same if protocol.expected_peer_id and peer.id != protocol.expected_peer_id: protocol.send_error_and_close_connection( 'Peer id different from the requested one.') return # is it on the whitelist? if settings.ENABLE_PEER_WHITELIST and peer.id not in protocol.node.peers_whitelist: protocol.send_error_and_close_connection( 'Blocked. Get in touch with Hathor team.') return if peer.id == protocol.my_peer.id: protocol.send_error_and_close_connection('Are you my clone?!') return if protocol.connections: if protocol.connections.is_peer_connected(peer.id): protocol.send_error_and_close_connection( 'We are already connected.') return entrypoint_valid = yield peer.validate_entrypoint(protocol) if not entrypoint_valid: protocol.send_error_and_close_connection( 'Connection string is not in the entrypoints.') return if protocol.use_ssl: certificate_valid = peer.validate_certificate(protocol) if not certificate_valid: protocol.send_error_and_close_connection( 'Public keys from peer and certificate are not the same.') return # If it gets here, the peer is validated, and we are ready to start communicating. protocol.peer = peer self.send_ready()
def test_retry_connection(self): p = PeerId() interval = p.retry_interval p.increment_retry_attempt(0) self.assertEqual( settings.PEER_CONNECTION_RETRY_INTERVAL_MULTIPLIER * interval, p.retry_interval) self.assertEqual(interval, p.retry_timestamp) # when retry_interval is already 180 p.retry_interval = settings.PEER_CONNECTION_RETRY_MAX_RETRY_INTERVAL + 10 p.increment_retry_attempt(0) self.assertEqual(settings.PEER_CONNECTION_RETRY_MAX_RETRY_INTERVAL, p.retry_interval) # reset p.reset_retry_timestamp() self.assertEqual(p.retry_interval, 5) self.assertEqual(p.retry_timestamp, 0)
def connect_to_if_not_connected(self, peer: PeerId, now: int) -> None: """ Attempts to connect if it is not connected to the peer. """ if not peer.entrypoints: # It makes no sense to keep storing peers that have disconnected and have no entrypoints # We will never be able to connect to them anymore and they will only keep spending memory # and other resources when used in APIs, so we are removing them here if peer.id not in self.connected_peers: self.peer_storage.remove(peer) return if peer.id in self.connected_peers: return assert peer.id is not None if peer.can_retry(now): self.connect_to(self.rng.choice(peer.entrypoints), peer)
def test_factory(self): pre_conn = get_table('filter').get_chain('pre_conn') match = NetfilterMatchIPAddress('192.168.0.1/32') rule = NetfilterRule(match, NetfilterReject()) pre_conn.add_rule(rule) wrapped_factory = HathorServerFactory('testnet', PeerId(), node=None, use_ssl=False) factory = NetfilterFactory(connections=None, wrappedFactory=wrapped_factory) ret = factory.buildProtocol(IPv4Address('TCP', '192.168.0.1', 1234)) self.assertIsNone(ret) ret = factory.buildProtocol(IPv4Address('TCP', '192.168.0.2', 1234)) self.assertIsNotNone(ret)
def create_peer(self, network: Optional[str] = None, peer_id: Optional[PeerId] = None, enable_sync_v1: bool = True, enable_sync_v2: bool = True, soft_voided_tx_ids: Optional[Set[bytes]] = None) -> HathorManager: assert self._started if network is None: network = self._network wallet = HDWallet(gap_limit=2) wallet._manually_initialize() assert peer_id is not None # XXX: temporary, for checking that tests are using the peer_id if peer_id is None: peer_id = PeerId() tx_storage = TransactionMemoryStorage() manager = HathorManager( self._clock, peer_id=peer_id, network=network, wallet=wallet, enable_sync_v1=enable_sync_v1, enable_sync_v2=enable_sync_v2, tx_storage=tx_storage, rng=Random(self.rng.getrandbits(64)), soft_voided_tx_ids=soft_voided_tx_ids, ) manager.reactor = self._clock manager._full_verification = True manager.start() self.run_to_completion() # Don't use it anywhere else. It is unsafe to generate mnemonic words like this. # It should be used only for testing purposes. m = Mnemonic('english') words = m.to_mnemonic(self.rng.randbytes(32)) self.log.debug('randomized step: generate wallet', words=words) wallet.unlock(words=words, tx_storage=manager.tx_storage) return manager
def test_merge_peer(self): # Testing peer storage with merge of peers peer_storage = PeerStorage() p1 = PeerId() p2 = PeerId() p2.id = p1.id p2.public_key = p1.public_key p1.public_key = '' peer_storage.add_or_merge(p1) self.assertEqual(len(peer_storage), 1) peer_storage.add_or_merge(p2) peer = peer_storage[p1.id] self.assertEqual(peer.id, p1.id) self.assertEqual(peer.private_key, p1.private_key) self.assertEqual(peer.public_key, p1.public_key) self.assertEqual(peer.entrypoints, []) p3 = PeerId() p3.entrypoints.append('1') p3.entrypoints.append('3') p3.public_key = '' p4 = PeerId() p4.public_key = '' p4.private_key = '' p4.id = p3.id p4.entrypoints.append('2') p4.entrypoints.append('3') peer_storage.add_or_merge(p4) self.assertEqual(len(peer_storage), 2) peer_storage.add_or_merge(p3) self.assertEqual(len(peer_storage), 2) peer = peer_storage[p3.id] self.assertEqual(peer.id, p3.id) self.assertEqual(peer.private_key, p3.private_key) self.assertEqual(peer.entrypoints, ['2', '3', '1']) with self.assertRaises(ValueError): peer_storage.add(p1)
def test_sign_verify_fail(self): data = b'abacate' p1 = PeerId() signature = p1.sign(data) signature = signature[::-1] self.assertFalse(p1.verify_signature(signature, data))
def test_sign_verify(self): data = b'abacate' p1 = PeerId() signature = p1.sign(data) self.assertTrue(p1.verify_signature(signature, data))
def prepare(self, args: Namespace) -> None: import hathor from hathor.cli.util import check_or_exit from hathor.conf import HathorSettings from hathor.conf.get_settings import get_settings_module from hathor.daa import TestMode, _set_test_mode from hathor.manager import HathorManager from hathor.p2p.peer_discovery import BootstrapPeerDiscovery, DNSPeerDiscovery from hathor.p2p.peer_id import PeerId from hathor.p2p.utils import discover_hostname from hathor.transaction import genesis from hathor.transaction.storage import ( TransactionCacheStorage, TransactionCompactStorage, TransactionMemoryStorage, TransactionRocksDBStorage, TransactionStorage, ) from hathor.wallet import HDWallet, Wallet settings = HathorSettings() settings_module = get_settings_module() # only used for logging its location self.log = logger.new() from setproctitle import setproctitle setproctitle('{}hathor-core'.format(args.procname_prefix)) if args.recursion_limit: sys.setrecursionlimit(args.recursion_limit) else: sys.setrecursionlimit(5000) try: import resource except ModuleNotFoundError: pass else: (nofile_soft, _) = resource.getrlimit(resource.RLIMIT_NOFILE) if nofile_soft < 256: print('Maximum number of open file descriptors is too low. Minimum required is 256.') sys.exit(-2) if not args.peer: peer_id = PeerId() else: data = json.load(open(args.peer, 'r')) peer_id = PeerId.create_from_json(data) python = f'{platform.python_version()}-{platform.python_implementation()}' self.check_unsafe_arguments(args) self.log.info( 'hathor-core v{hathor}', hathor=hathor.__version__, pid=os.getpid(), genesis=genesis.GENESIS_HASH.hex()[:7], my_peer_id=str(peer_id.id), python=python, platform=platform.platform(), settings=settings_module.__file__, ) def create_wallet(): if args.wallet == 'hd': kwargs = { 'words': args.words, } if args.passphrase: wallet_passphrase = getpass.getpass(prompt='HD Wallet passphrase:') kwargs['passphrase'] = wallet_passphrase.encode() if args.data: kwargs['directory'] = args.data return HDWallet(**kwargs) elif args.wallet == 'keypair': print('Using KeyPairWallet') if args.data: wallet = Wallet(directory=args.data) else: wallet = Wallet() wallet.flush_to_disk_interval = 5 # seconds if args.unlock_wallet: wallet_passwd = getpass.getpass(prompt='Wallet password:'******'Invalid type for wallet') tx_storage: TransactionStorage if args.memory_storage: check_or_exit(not args.data, '--data should not be used with --memory-storage') # if using MemoryStorage, no need to have cache tx_storage = TransactionMemoryStorage() assert not args.x_rocksdb_indexes, 'RocksDB indexes require RocksDB data' self.log.info('with storage', storage_class=type(tx_storage).__name__) elif args.json_storage: check_or_exit(args.data, '--data is expected') assert not args.x_rocksdb_indexes, 'RocksDB indexes require RocksDB data' tx_storage = TransactionCompactStorage(path=args.data, with_index=(not args.cache)) else: check_or_exit(args.data, '--data is expected') if args.rocksdb_storage: self.log.warn('--rocksdb-storage is now implied, no need to specify it') cache_capacity = args.rocksdb_cache use_memory_indexes = not args.x_rocksdb_indexes tx_storage = TransactionRocksDBStorage(path=args.data, with_index=(not args.cache), cache_capacity=cache_capacity, use_memory_indexes=use_memory_indexes) self.log.info('with storage', storage_class=type(tx_storage).__name__, path=args.data) if args.cache: check_or_exit(not args.memory_storage, '--cache should not be used with --memory-storage') tx_storage = TransactionCacheStorage(tx_storage, reactor) if args.cache_size: tx_storage.capacity = args.cache_size if args.cache_interval: tx_storage.interval = args.cache_interval self.log.info('with cache', capacity=tx_storage.capacity, interval=tx_storage.interval) self.tx_storage = tx_storage self.log.info('with indexes', indexes_class=type(tx_storage.indexes).__name__) if args.wallet: self.wallet = create_wallet() self.log.info('with wallet', wallet=self.wallet, path=args.data) else: self.wallet = None if args.hostname and args.auto_hostname: print('You cannot use --hostname and --auto-hostname together.') sys.exit(-1) if not args.auto_hostname: hostname = args.hostname else: print('Trying to discover your hostname...') hostname = discover_hostname() if not hostname: print('Aborting because we could not discover your hostname.') print('Try again or run without --auto-hostname.') sys.exit(-1) print('Hostname discovered and set to {}'.format(hostname)) network = settings.NETWORK_NAME enable_sync_v1 = not args.x_sync_v2_only enable_sync_v2 = args.x_sync_v2_only or args.x_sync_bridge self.manager = HathorManager( reactor, peer_id=peer_id, network=network, hostname=hostname, tx_storage=self.tx_storage, wallet=self.wallet, wallet_index=args.wallet_index, stratum_port=args.stratum, ssl=True, checkpoints=settings.CHECKPOINTS, enable_sync_v1=enable_sync_v1, enable_sync_v2=enable_sync_v2, soft_voided_tx_ids=set(settings.SOFT_VOIDED_TX_IDS), ) if args.allow_mining_without_peers: self.manager.allow_mining_without_peers() if args.x_localhost_only: self.manager.connections.localhost_only = True dns_hosts = [] if settings.BOOTSTRAP_DNS: dns_hosts.extend(settings.BOOTSTRAP_DNS) if args.dns: dns_hosts.extend(args.dns) if dns_hosts: self.manager.add_peer_discovery(DNSPeerDiscovery(dns_hosts)) if args.bootstrap: self.manager.add_peer_discovery(BootstrapPeerDiscovery(args.bootstrap)) if args.test_mode_tx_weight: _set_test_mode(TestMode.TEST_TX_WEIGHT) if self.wallet: self.wallet.test_mode = True if args.x_full_verification: self.manager._full_verification = True if args.x_fast_init_beta: self.log.warn('--x-fast-init-beta is now the default, no need to specify it') for description in args.listen: self.manager.add_listen_address(description) self.start_manager(args) self.register_resources(args)
def test_no_private_key(self): p1 = PeerId() p1.private_key = None p1.validate()
def test_invalid_private_key(self): p1 = PeerId() p2 = PeerId() p1.private_key = p2.private_key self.assertRaises(InvalidPeerIdException, p1.validate)
def prepare(self, args: Namespace) -> None: import hathor from hathor.conf import HathorSettings from hathor.manager import HathorManager, TestMode from hathor.p2p.peer_discovery import BootstrapPeerDiscovery, DNSPeerDiscovery from hathor.p2p.peer_id import PeerId from hathor.p2p.utils import discover_hostname from hathor.transaction import genesis from hathor.transaction.storage import ( TransactionStorage, TransactionCacheStorage, TransactionCompactStorage, TransactionMemoryStorage, ) from hathor.wallet import HDWallet, Wallet settings = HathorSettings() if args.recursion_limit: sys.setrecursionlimit(args.recursion_limit) if not args.peer: peer_id = PeerId() else: data = json.load(open(args.peer, 'r')) peer_id = PeerId.create_from_json(data) print('Hathor v{} (genesis {})'.format(hathor.__version__, genesis.GENESIS_HASH.hex()[:7])) print('My peer id is', peer_id.id) def create_wallet(): if args.wallet == 'hd': print('Using HDWallet') kwargs = { 'words': args.words, } if args.passphrase: wallet_passphrase = getpass.getpass( prompt='HD Wallet passphrase:') kwargs['passphrase'] = wallet_passphrase.encode() if args.data: kwargs['directory'] = args.data return HDWallet(**kwargs) elif args.wallet == 'keypair': print('Using KeyPairWallet') if args.data: wallet = Wallet(directory=args.data) else: wallet = Wallet() wallet.flush_to_disk_interval = 5 # seconds if args.unlock_wallet: wallet_passwd = getpass.getpass(prompt='Wallet password:'******'Invalid type for wallet') tx_storage: TransactionStorage if args.data: wallet_dir = args.data print('Using Wallet at {}'.format(wallet_dir)) if args.rocksdb_storage: from hathor.transaction.storage import TransactionRocksDBStorage tx_dir = os.path.join(args.data, 'tx.db') tx_storage = TransactionRocksDBStorage( path=tx_dir, with_index=(not args.cache)) print('Using TransactionRocksDBStorage at {}'.format(tx_dir)) else: tx_dir = os.path.join(args.data, 'tx') tx_storage = TransactionCompactStorage( path=tx_dir, with_index=(not args.cache)) print('Using TransactionCompactStorage at {}'.format(tx_dir)) if args.cache: tx_storage = TransactionCacheStorage(tx_storage, reactor) if args.cache_size: tx_storage.capacity = args.cache_size if args.cache_interval: tx_storage.interval = args.cache_interval print( 'Using TransactionCacheStorage, capacity {}, interval {}s'. format(tx_storage.capacity, tx_storage.interval)) tx_storage.start() else: # if using MemoryStorage, no need to have cache tx_storage = TransactionMemoryStorage() print('Using TransactionMemoryStorage') self.tx_storage = tx_storage if args.wallet: self.wallet = create_wallet() else: self.wallet = None if args.hostname and args.auto_hostname: print('You cannot use --hostname and --auto-hostname together.') sys.exit(-1) if not args.auto_hostname: hostname = args.hostname else: print('Trying to discover your hostname...') hostname = discover_hostname() if not hostname: print('Aborting because we could not discover your hostname.') print('Try again or run without --auto-hostname.') sys.exit(-1) print('Hostname discovered and set to {}'.format(hostname)) network = settings.NETWORK_NAME self.manager = HathorManager(reactor, peer_id=peer_id, network=network, hostname=hostname, tx_storage=self.tx_storage, wallet=self.wallet, wallet_index=args.wallet_index, stratum_port=args.stratum, min_block_weight=args.min_block_weight, ssl=True) if args.allow_mining_without_peers: self.manager.allow_mining_without_peers() dns_hosts = [] if settings.BOOTSTRAP_DNS: dns_hosts.extend(settings.BOOTSTRAP_DNS) if args.dns: dns_hosts.extend(args.dns) if dns_hosts: self.manager.add_peer_discovery(DNSPeerDiscovery(dns_hosts)) if args.bootstrap: self.manager.add_peer_discovery( BootstrapPeerDiscovery(args.bootstrap)) if args.test_mode_tx_weight: self.manager.test_mode = TestMode.TEST_TX_WEIGHT if self.wallet: self.wallet.test_mode = True for description in args.listen: self.manager.add_listen_address(description) self.start_manager() self.register_resources(args)
def __init__(self, reactor: IReactorCore, peer_id: Optional[PeerId] = None, network: Optional[str] = None, hostname: Optional[str] = None, pubsub: Optional[PubSubManager] = None, wallet: Optional[BaseWallet] = None, tx_storage: Optional[TransactionStorage] = None, peer_storage: Optional[Any] = None, default_port: int = 40403, wallet_index: bool = False, stratum_port: Optional[int] = None, min_block_weight: Optional[int] = None, ssl: bool = True) -> None: """ :param reactor: Twisted reactor which handles the mainloop and the events. :param peer_id: Id of this node. If not given, a new one is created. :param network: Name of the network this node participates. Usually it is either testnet or mainnet. :type network: string :param hostname: The hostname of this node. It is used to generate its entrypoints. :type hostname: string :param pubsub: If not given, a new one is created. :type pubsub: :py:class:`hathor.pubsub.PubSubManager` :param tx_storage: If not given, a :py:class:`TransactionMemoryStorage` one is created. :type tx_storage: :py:class:`hathor.transaction.storage.transaction_storage.TransactionStorage` :param peer_storage: If not given, a new one is created. :type peer_storage: :py:class:`hathor.p2p.peer_storage.PeerStorage` :param default_port: Network default port. It is used when only ip addresses are discovered. :type default_port: int :param wallet_index: If should add a wallet index in the storage :type wallet_index: bool :param stratum_port: Stratum server port. Stratum server will only be created if it is not None. :type stratum_port: Optional[int] :param min_block_weight: Minimum weight for blocks. :type min_block_weight: Optional[int] """ from hathor.p2p.factory import HathorServerFactory, HathorClientFactory from hathor.p2p.manager import ConnectionsManager from hathor.transaction.storage.memory_storage import TransactionMemoryStorage from hathor.metrics import Metrics self.log = logger.new() self.reactor = reactor if hasattr(self.reactor, 'addSystemEventTrigger'): self.reactor.addSystemEventTrigger('after', 'shutdown', self.stop) self.state: Optional[HathorManager.NodeState] = None self.profiler: Optional[Any] = None # Hostname, used to be accessed by other peers. self.hostname = hostname # Remote address, which can be different from local address. self.remote_address = None self.my_peer = peer_id or PeerId() self.network = network or 'testnet' # XXX Should we use a singleton or a new PeerStorage? [msbrogli 2018-08-29] self.pubsub = pubsub or PubSubManager(self.reactor) self.tx_storage = tx_storage or TransactionMemoryStorage() self.tx_storage.pubsub = self.pubsub if wallet_index and self.tx_storage.with_index: self.tx_storage.wallet_index = WalletIndex(self.pubsub) self.tx_storage.tokens_index = TokensIndex() self.avg_time_between_blocks = settings.AVG_TIME_BETWEEN_BLOCKS self.min_block_weight = min_block_weight or settings.MIN_BLOCK_WEIGHT self.min_tx_weight = settings.MIN_TX_WEIGHT self.metrics = Metrics( pubsub=self.pubsub, avg_time_between_blocks=self.avg_time_between_blocks, tx_storage=tx_storage, reactor=self.reactor, ) self.consensus_algorithm = ConsensusAlgorithm() self.peer_discoveries: List[PeerDiscovery] = [] self.ssl = ssl self.server_factory = HathorServerFactory(self.network, self.my_peer, node=self, use_ssl=ssl) self.client_factory = HathorClientFactory(self.network, self.my_peer, node=self, use_ssl=ssl) self.connections = ConnectionsManager(self.reactor, self.my_peer, self.server_factory, self.client_factory, self.pubsub, self, ssl) self.wallet = wallet if self.wallet: self.wallet.pubsub = self.pubsub self.wallet.reactor = self.reactor # When manager is in test mode we reduce the weight of blocks/transactions. self.test_mode: int = 0 # Multiplier coefficient to adjust the minimum weight of a normal tx to 18 self.min_tx_weight_coefficient = 1.6 # Amount in which tx min weight reaches the middle point between the minimum and maximum weight. self.min_tx_weight_k = 100 self.stratum_factory = StratumFactory( manager=self, port=stratum_port) if stratum_port else None # Set stratum factory for metrics object self.metrics.stratum_factory = self.stratum_factory self._allow_mining_without_peers = False # Thread pool used to resolve pow when sending tokens self.pow_thread_pool = ThreadPool(minthreads=0, maxthreads=settings.MAX_POW_THREADS, name='Pow thread pool') # List of addresses to listen for new connections (eg: [tcp:8000]) self.listen_addresses: List[str] = []
def test_invalid_id(self): p1 = PeerId() p1.id = p1.id[::-1] self.assertRaises(InvalidPeerIdException, p1.validate)