def test_invalid_same_peer_id(self): manager3 = self.create_peer(self.network, peer_id=self.peer_id1) conn = FakeConnection(self.manager1, manager3) conn.run_one_step() conn.run_one_step() self._check_result_only_cmd(conn.tr1.value(), b'ERROR') self.assertTrue(conn.tr1.disconnecting)
def test_block_sync_new_blocks_and_txs(self): self._add_new_blocks(25) self._add_new_transactions(3) self._add_new_blocks(4) self._add_new_transactions(5) manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) for _ in range(1000): conn.run_one_step() self.clock.advance(0.1) # dot1 = self.manager1.tx_storage.graphviz(format='pdf') # dot1.render('dot1') # dot2 = manager2.tx_storage.graphviz(format='pdf') # dot2.render('dot2') node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(self.manager1.tx_storage.latest_timestamp, manager2.tx_storage.latest_timestamp) self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager2) self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2)
def setUp(self): super().setUp() self.network = 'testnet' self.peer_id1 = PeerId() self.peer_id2 = PeerId() self.manager1 = self.create_peer(self.network, peer_id=self.peer_id1) self.manager2 = self.create_peer(self.network, peer_id=self.peer_id2) self.conn = FakeConnection(self.manager1, self.manager2)
def test_new_syncing_peer(self): nodes = [] miners = [] tx_generators = [] simulator = Simulator(self.clock) for hashpower in [10e6, 8e6, 5e6, 5e6, 5e6]: manager = self.create_peer(self.network) for node in nodes: conn = FakeConnection(manager, node, latency=0.085) simulator.add_connection(conn) nodes.append(manager) miner = MinerSimulator(manager, hashpower=hashpower) miner.start() miners.append(miner) for i, rate in enumerate([5, 4, 3, 2, 1]): tx_gen = RandomTransactionGenerator(nodes[i], rate=rate * 1 / 60., hashpower=1e6, ignore_no_funds=True) tx_gen.start() tx_generators.append(tx_gen) simulator.run(45 * 60) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node) late_manager = self.create_peer(self.network) for node in nodes: conn = FakeConnection(late_manager, node, latency=0.300) simulator.add_connection(conn) nodes.append(late_manager) simulator.run(8 * 60) for tx_gen in tx_generators: tx_gen.stop() for miner in miners: miner.stop() simulator.run(60) # dot1 = nodes[0].tx_storage.graphviz(format='pdf') # dot1.render('dot1tmp') # dot2 = late_manager.tx_storage.graphviz(format='pdf') # dot2.render('dot2tmp') for node in nodes[1:]: self.assertTipsEqual(nodes[0], node)
def test_two_connections(self): self.conn1.run_one_step() # HELLO self.conn1.run_one_step() # PEER-ID self.conn1.run_one_step() # GET-PEERS self.conn1.run_one_step() # GET-TIPS manager3 = self.create_peer(self.network) conn = FakeConnection(self.manager1, manager3) conn.run_one_step() # HELLO conn.run_one_step() # PEER-ID self._check_result_only_cmd(self.conn1.tr1.value(), b'PEERS') self.conn1.run_one_step()
def test_capabilities(self): network = 'testnet' manager1 = self.create_peer( network, capabilities=[settings.CAPABILITY_WHITELIST]) manager2 = self.create_peer(network, capabilities=[]) conn = FakeConnection(manager1, manager2) # Run the p2p protocol. for _ in range(100): conn.run_one_step(debug=True) self.clock.advance(0.1) # Even if we don't have the capability we must connect because the whitelist url conf is None self.assertEqual(conn._proto1.state.state_name, 'READY') self.assertEqual(conn._proto2.state.state_name, 'READY') manager3 = self.create_peer( network, capabilities=[settings.CAPABILITY_WHITELIST]) manager4 = self.create_peer( network, capabilities=[settings.CAPABILITY_WHITELIST]) conn2 = FakeConnection(manager3, manager4) # Run the p2p protocol. for _ in range(100): conn2.run_one_step(debug=True) self.clock.advance(0.1) self.assertEqual(conn2._proto1.state.state_name, 'READY') self.assertEqual(conn2._proto2.state.state_name, 'READY')
def test_many_miners_since_beginning(self): nodes = [] miners = [] simulator = Simulator(self.clock) for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]: manager = self.create_peer(self.network) for node in nodes: conn = FakeConnection(manager, node, latency=0.085) simulator.add_connection(conn) nodes.append(manager) miner = MinerSimulator(manager, hashpower=hashpower) miner.start() miners.append(miner) simulator.run(60 * 60) for miner in miners: miner.stop() simulator.run(15) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node)
def test_invalid_different_network(self): manager3 = self.create_peer(network='mainnet') conn = FakeConnection(self.manager1, manager3) conn.run_one_step() # HELLO self._check_result_only_cmd(conn.peek_tr1_value(), b'ERROR') self.assertTrue(conn.tr1.disconnecting) conn.run_one_step() # ERROR
def test_block_sync_many_new_blocks(self): self._add_new_blocks(150) manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) while not conn.is_empty(): conn.run_one_step(debug=True) self.clock.advance(0.1) node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager2) self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2)
def test_block_sync_many_new_blocks(self): self._add_new_blocks(150) manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) for idx in range(1000): if not conn.tr1.value() and not conn.tr2.value(): break conn.run_one_step() self.clock.advance(0.1) node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager2) self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2)
def test_block_sync_only_genesis(self): manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) conn.run_one_step() # HELLO conn.run_one_step() # PEER-ID conn.run_one_step() # READY node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2)
def test_invalid_same_peer_id2(self): # we connect nodes 1-2 and 1-3. Nodes 2 and 3 have the same peer_id. The connections # are established simultaneously, so we do not detect a peer id duplication in PEER_ID # state, only on READY state manager3 = self.create_peer(self.network, peer_id=self.peer_id2) conn = FakeConnection(manager3, self.manager1) # HELLO self.conn1.run_one_step() conn.run_one_step() # PEER-ID self.conn1.run_one_step() conn.run_one_step() # READY self.conn1.run_one_step() conn.run_one_step() self.run_to_completion() # one of the peers will close the connection. We don't know which on, as it depends # on the peer ids conn1_value = self.conn1.tr1.value() + self.conn1.tr2.value() if b'ERROR' in conn1_value: conn_dead = self.conn1 conn_alive = conn else: conn_dead = conn conn_alive = self.conn1 self._check_result_only_cmd( conn_dead.tr1.value() + conn_dead.tr2.value(), b'ERROR') # at this point, the connection must be closing as the error was detected on READY state self.assertIn( True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting]) # check connected_peers connected_peers = list( self.manager1.connections.connected_peers.values()) self.assertEquals(1, len(connected_peers)) self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2]) # connection is still up self.assertIsConnected(conn_alive)
def test_block_sync_only_genesis(self): manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) conn.run_one_step() # HELLO conn.run_one_step() # PEER-ID for _ in range(100): if not conn.tr1.value() and not conn.tr2.value(): break conn.run_one_step() self.clock.advance(0.1) node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2)
def test_20_nodes(self): nodes = [] miners = [] tx_generators = [] simulator = Simulator(self.clock) for _ in range(1): hashpower = 1e6 * random.randint(1, 20) manager = self.create_peer(self.network) for node in nodes: conn = FakeConnection(manager, node, latency=0.085) simulator.add_connection(conn) nodes.append(manager) miner = MinerSimulator(manager, hashpower=hashpower) miner.start() miners.append(miner) rate = random.randint(1, 30) tx_gen = RandomTransactionGenerator(manager, rate=rate * 1 / 60., hashpower=1e6, ignore_no_funds=True) tx_gen.start() tx_generators.append(tx_gen) simulator.run(5 * 60) for _ in range(20): hashpower = 1e6 * random.randint(1, 20) manager = self.create_peer(self.network) for node in nodes: conn = FakeConnection(manager, node, latency=0.085) simulator.add_connection(conn) nodes.append(manager) simulator.run(2 * 60) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node) for manager in nodes: miner = MinerSimulator(manager, hashpower=hashpower) miner.start() miners.append(miner) rate = random.randint(1, 5) tx_gen = RandomTransactionGenerator(manager, rate=rate * 1 / 60., hashpower=1e6, ignore_no_funds=True) # They will randomly send token between them. tx_gen.send_to = [x.wallet.get_unused_address(mark_as_used=True) for x in nodes] tx_gen.start() tx_generators.append(tx_gen) simulator.run(30 * 60) print() print() print('Manager 1: Connection metrics') for conn in nodes[0].connections.iter_ready_connections(): conn.metrics.print_stats() print() print() dot1 = nodes[0].tx_storage.graphviz(format='dot') dot1.render('dot1tmp') for tx_gen in tx_generators: tx_gen.stop() for miner in miners: miner.stop() simulator.run(60) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node)
def test_split_brain(self): debug_pdf = False manager1 = self.create_peer(self.network, unlock_wallet=True) manager1.avg_time_between_blocks = 3 manager2 = self.create_peer(self.network, unlock_wallet=True) manager2.avg_time_between_blocks = 3 for _ in range(10): add_new_block(manager1, advance_clock=1) add_blocks_unlock_reward(manager1) add_new_block(manager2, advance_clock=1) add_blocks_unlock_reward(manager2) self.clock.advance(10) for _ in range(random.randint(3, 10)): add_new_transactions(manager1, random.randint(2, 4)) add_new_transactions(manager2, random.randint(3, 7)) add_new_double_spending(manager1) add_new_double_spending(manager2) self.clock.advance(10) self.clock.advance(20) self.assertTipsNotEqual(manager1, manager2) self.assertConsensusValid(manager1) self.assertConsensusValid(manager2) if debug_pdf: dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() dot1.render('dot1-pre') conn = FakeConnection(manager1, manager2) conn.run_one_step() # HELLO conn.run_one_step() # PEER-ID empty_counter = 0 for i in range(1000): if conn.is_empty(): empty_counter += 1 if empty_counter > 10: break else: empty_counter = 0 conn.run_one_step() self.clock.advance(0.2) if debug_pdf: dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() dot1.render('dot1-post') dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() dot2.render('dot2-post') node_sync = conn.proto1.state.get_sync_plugin() self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(manager1, manager2) self.assertConsensusEqual(manager1, manager2) # self.assertConsensusValid(manager1) self.assertConsensusValid(manager2)
class HathorProtocolTestCase(unittest.TestCase): def setUp(self): super().setUp() self.network = 'testnet' self.peer_id1 = PeerId() self.peer_id2 = PeerId() self.manager1 = self.create_peer(self.network, peer_id=self.peer_id1) self.manager2 = self.create_peer(self.network, peer_id=self.peer_id2) self.conn1 = FakeConnection(self.manager1, self.manager2) def assertIsConnected(self, conn=None): if conn is None: conn = self.conn1 self.assertFalse(conn.tr1.disconnecting) self.assertFalse(conn.tr2.disconnecting) def _send_cmd(self, proto, cmd, payload=None): if not payload: line = '{}\r\n'.format(cmd) else: line = '{} {}\r\n'.format(cmd, payload) if isinstance(line, str): line = line.encode('utf-8') return proto.dataReceived(line) def _check_result_only_cmd(self, result, expected_cmd): cmd_list = [] for line in result.split(b'\r\n'): cmd, _, _ = line.partition(b' ') cmd_list.append(cmd) self.assertIn(expected_cmd, cmd_list) def _check_cmd_and_value(self, result, expected): result_list = [] for line in result.split(b'\r\n'): cmd, _, data = line.partition(b' ') result_list.append((cmd, data)) self.assertIn(expected, result_list) def test_on_connect(self): self._check_result_only_cmd(self.conn1.tr1.value(), b'HELLO') def test_invalid_command(self): self._send_cmd(self.conn1.proto1, 'INVALID-CMD') self.conn1.proto1.state.handle_error('') self.assertTrue(self.conn1.tr1.disconnecting) def test_rate_limit(self): hits = 1 window = 60 self.conn1.proto1.ratelimit.set_limit( HathorProtocol.RateLimitKeys.GLOBAL, hits, window) # First will be ignored self._send_cmd(self.conn1.proto1, 'HELLO') # Second will reach limit self._send_cmd(self.conn1.proto1, 'HELLO') self._check_cmd_and_value( self.conn1.tr1.value(), (b'THROTTLE', 'global At most {} hits every {} seconds'.format( hits, window).encode('utf-8'))) self.conn1.proto1.state.handle_throttle(b'') # Test empty disconnect self.conn1.proto1.state = None self.conn1.proto1.connections = None self.conn1.proto1.on_disconnect('') def test_invalid_size(self): self.conn1.tr1.clear() # Creating big payload big_payload = '[' for x in range(65536): big_payload = '{}{}'.format(big_payload, x) big_payload = '{}]'.format(big_payload) self._send_cmd(self.conn1.proto1, 'HELLO', big_payload) self.assertTrue(self.conn1.tr1.disconnecting) def test_invalid_payload(self): self.conn1.run_one_step() self.conn1.run_one_step() self.conn1.run_one_step() with self.assertRaises(json.decoder.JSONDecodeError): self._send_cmd(self.conn1.proto1, 'PEERS', 'abc') def test_invalid_hello1(self): self.conn1.tr1.clear() self._send_cmd(self.conn1.proto1, 'HELLO') self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) def test_invalid_hello2(self): self.conn1.tr1.clear() self._send_cmd(self.conn1.proto1, 'HELLO', 'invalid_payload') self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) def test_invalid_hello3(self): self.conn1.tr1.clear() self._send_cmd(self.conn1.proto1, 'HELLO', '{}') self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) def test_invalid_hello4(self): self.conn1.tr1.clear() self._send_cmd( self.conn1.proto1, 'HELLO', '{"app": 0, "remote_address": 1, "network": 2, "genesis_hash": "123", "settings_hash": "456"}' ) self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) def test_invalid_hello5(self): # hello with clocks too far apart self.conn1.tr1.clear() data = self.conn1.proto2.state._get_hello_data() data['timestamp'] = data[ 'timestamp'] + settings.MAX_FUTURE_TIMESTAMP_ALLOWED / 2 + 1 self._send_cmd(self.conn1.proto1, 'HELLO', json.dumps(data)) self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) def test_valid_hello(self): self.conn1.run_one_step() self._check_result_only_cmd(self.conn1.tr1.value(), b'PEER-ID') self._check_result_only_cmd(self.conn1.tr2.value(), b'PEER-ID') self.assertFalse(self.conn1.tr1.disconnecting) self.assertFalse(self.conn1.tr2.disconnecting) @inlineCallbacks def test_invalid_peer_id(self): self.conn1.run_one_step() invalid_payload = { 'id': '123', 'entrypoints': ['tcp://localhost:1234'] } yield self._send_cmd(self.conn1.proto1, 'PEER-ID', json.dumps(invalid_payload)) self._check_result_only_cmd(self.conn1.tr1.value(), b'ERROR') self.assertTrue(self.conn1.tr1.disconnecting) # When a DNS request is made to twisted client, it starts a callLater to check the resolv file every minute # https://github.com/twisted/twisted/blob/59f8266c286e2b073ddb05c70317ac20693f2b0c/src/twisted/names/client.py#L147 # noqa # So we need to stop this call manually, otherwise the reactor would be unclean with a pending call # TODO We should use a fake DNS resolver for tests otherwise we would need internet connection to run it resolver = twisted.names.client.getResolver().resolvers[2] resolver._parseCall.cancel() def test_invalid_same_peer_id(self): manager3 = self.create_peer(self.network, peer_id=self.peer_id1) conn = FakeConnection(self.manager1, manager3) conn.run_one_step() conn.run_one_step() self._check_result_only_cmd(conn.tr1.value(), b'ERROR') self.assertTrue(conn.tr1.disconnecting) def test_invalid_same_peer_id2(self): # we connect nodes 1-2 and 1-3. Nodes 2 and 3 have the same peer_id. The connections # are established simultaneously, so we do not detect a peer id duplication in PEER_ID # state, only on READY state manager3 = self.create_peer(self.network, peer_id=self.peer_id2) conn = FakeConnection(manager3, self.manager1) # HELLO self.conn1.run_one_step() conn.run_one_step() # PEER-ID self.conn1.run_one_step() conn.run_one_step() # READY self.conn1.run_one_step() conn.run_one_step() self.run_to_completion() # one of the peers will close the connection. We don't know which on, as it depends # on the peer ids conn1_value = self.conn1.tr1.value() + self.conn1.tr2.value() if b'ERROR' in conn1_value: conn_dead = self.conn1 conn_alive = conn else: conn_dead = conn conn_alive = self.conn1 self._check_result_only_cmd( conn_dead.tr1.value() + conn_dead.tr2.value(), b'ERROR') # at this point, the connection must be closing as the error was detected on READY state self.assertIn( True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting]) # check connected_peers connected_peers = list( self.manager1.connections.connected_peers.values()) self.assertEquals(1, len(connected_peers)) self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2]) # connection is still up self.assertIsConnected(conn_alive) def test_invalid_different_network(self): manager3 = self.create_peer(network='mainnet') conn = FakeConnection(self.manager1, manager3) conn.run_one_step() self._check_result_only_cmd(conn.tr1.value(), b'ERROR') self.assertTrue(conn.tr1.disconnecting) conn.run_one_step() def test_valid_hello_and_peer_id(self): self.conn1.run_one_step() self.conn1.run_one_step() # Originally, only a GET-PEERS message would be received, but now it is receiving two messages in a row. # self._check_result_only_cmd(self.tr1.value(), b'GET-PEERS') # self._check_result_only_cmd(self.tr2.value(), b'GET-PEERS') self.assertIsConnected() self.conn1.run_one_step() self.conn1.run_one_step() self.assertIsConnected() def test_send_ping(self): self.conn1.run_one_step() self.conn1.run_one_step() self.conn1.run_one_step() # Originally, only a GET-PEERS message would be received, but now it is receiving two messages in a row. # self._check_result_only_cmd(self.tr1.value(), b'GET-PEERS') # self._check_result_only_cmd(self.tr2.value(), b'GET-PEERS') self.assertIsConnected() self.clock.advance(5) self.assertTrue(b'PING\r\n' in self.conn1.tr1.value()) self.assertTrue(b'PING\r\n' in self.conn1.tr2.value()) self.conn1.run_one_step() self.assertTrue(b'PONG\r\n' in self.conn1.tr1.value()) self.assertTrue(b'PONG\r\n' in self.conn1.tr2.value()) while b'PONG\r\n' in self.conn1.tr1.value(): self.conn1.run_one_step() self.assertEqual(self.clock.seconds(), self.conn1.proto1.last_message) def test_send_invalid_unicode(self): # \xff is an invalid unicode. self.conn1.proto1.dataReceived(b'\xff\r\n') self.assertTrue(self.conn1.tr1.disconnecting) def test_on_disconnect(self): self.assertIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) self.conn1.disconnect('Testing') self.assertNotIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) def test_on_disconnect_after_hello(self): self.conn1.run_one_step() self.assertIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) self.conn1.disconnect('Testing') self.assertNotIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) def test_on_disconnect_after_peer_id(self): self.conn1.run_one_step() self.assertIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) # The peer READY now depends on a message exchange from both peers, so we need one more step self.conn1.run_one_step() self.conn1.run_one_step() self.assertIn(self.conn1.proto1, self.manager1.connections.connected_peers.values()) self.assertNotIn(self.conn1.proto1, self.manager1.connections.handshaking_peers) self.conn1.disconnect('Testing') self.assertNotIn(self.conn1.proto1, self.manager1.connections.connected_peers.values()) def test_two_connections(self): self.conn1.run_one_step() # HELLO self.conn1.run_one_step() # PEER-ID self.conn1.run_one_step() # GET-PEERS self.conn1.run_one_step() # GET-TIPS manager3 = self.create_peer(self.network) conn = FakeConnection(self.manager1, manager3) conn.run_one_step() # HELLO conn.run_one_step() # PEER-ID self._check_result_only_cmd(self.conn1.tr1.value(), b'PEERS') self.conn1.run_one_step() @inlineCallbacks def test_get_data(self): self.conn1.run_one_step() self.conn1.run_one_step() self.conn1.run_one_step() self.assertIsConnected() missing_tx = '00000000228dfcd5dec1c9c6263f6430a5b4316bb9e3decb9441a6414bfd8697' yield self._send_cmd(self.conn1.proto1, 'GET-DATA', missing_tx) self._check_result_only_cmd(self.conn1.tr1.value(), b'NOT-FOUND') self.conn1.run_one_step()
def test_two_nodes(self): # import sys # from twisted.python import log # log.startLogging(sys.stdout) manager1 = self.create_peer(self.network) manager2 = self.create_peer(self.network) # manager1.start_log_animation('_debug1') # manager2.start_log_animation('_debug2') simulator = Simulator(self.clock) miner1 = MinerSimulator(manager1, hashpower=10e6) miner1.start() simulator.run(10) gen_tx1 = RandomTransactionGenerator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx1.start() simulator.run(60) conn12 = FakeConnection(manager1, manager2, latency=0.150) simulator.add_connection(conn12) simulator.run(60) miner2 = MinerSimulator(manager2, hashpower=100e6) miner2.start() simulator.run(120) gen_tx2 = RandomTransactionGenerator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx2.start() simulator.run(10 * 60) miner1.stop() miner2.stop() gen_tx1.stop() gen_tx2.stop() simulator.run(5 * 60) print() print() print('Manager 1: Connection metrics') for conn in manager1.connections.get_ready_connections(): conn.metrics.print_stats() print() print() # dot1 = manager1.tx_storage.graphviz(format='pdf') # dot1.render('test_sync1_v') # dot1f = manager1.tx_storage.graphviz_funds(format='pdf') # dot1f.render('test_sync1_f') self.assertTrue(conn12.is_connected) self.assertTipsEqual(manager1, manager2)
def setUp(self): super().setUp() self.web = StubSite(StatusResource(self.manager)) self.manager2 = self.create_peer('testnet') self.conn1 = FakeConnection(self.manager, self.manager2)
def test_many_nodes(self): manager1 = self.create_peer(self.network) manager2 = self.create_peer(self.network) nodes = [manager1, manager2] miners = [] simulator = Simulator(self.clock) miner1 = MinerSimulator(manager1, hashpower=10e6) miner1.start() miners.append(miner1) simulator.run(10) gen_tx1 = RandomTransactionGenerator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx1.start() simulator.run(60) conn12 = FakeConnection(manager1, manager2, latency=0.150) simulator.add_connection(conn12) simulator.run(60) miner2 = MinerSimulator(manager2, hashpower=100e6) miner2.start() miners.append(miner2) simulator.run(120) gen_tx2 = RandomTransactionGenerator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx2.start() simulator.run(10 * 60) print() print() print('Manager 1: Two nodes, timestamp = {}'.format( self.clock.seconds())) for conn in manager1.connections.get_ready_connections(): conn.metrics.print_stats() print() print() for _ in range(4): tmp_manager = self.create_peer(self.network) for m in nodes: latency = random.random() conn = FakeConnection(tmp_manager, m, latency=latency) simulator.add_connection(conn) nodes.append(tmp_manager) simulator.run(10 * 60) print() print() print('Manager 1: {} nodes, timestamp = {}'.format( len(nodes), self.clock.seconds())) for conn in manager1.connections.get_ready_connections(): conn.metrics.print_stats() print() print() for miner in miners: miner.stop() gen_tx1.stop() gen_tx2.stop() simulator.run(5 * 60) print() print() print('Manager 1: Final connection metrics') for conn in manager1.connections.get_ready_connections(): conn.metrics.print_stats() print() print() for node in nodes[1:]: self.assertTipsEqual(manager1, node)
def test_downloader(self): blocks = self._add_new_blocks(3) manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) conn = FakeConnection(self.manager1, manager2) # Get to PEER-ID state only because when it gets to READY it will automatically sync conn.run_one_step() self.assertTrue(isinstance(conn.proto1.state, PeerIdState)) self.assertTrue(isinstance(conn.proto2.state, PeerIdState)) node_sync1 = NodeSyncTimestamp(conn.proto1, reactor=conn.proto1.node.reactor) node_sync1.start() node_sync2 = NodeSyncTimestamp(conn.proto2, reactor=conn.proto2.node.reactor) node_sync2.start() self.assertTrue(isinstance(conn.proto1.state, PeerIdState)) self.assertTrue(isinstance(conn.proto2.state, PeerIdState)) downloader = conn.proto2.connections.downloader deferred1 = downloader.get_tx(blocks[0].hash, node_sync1) deferred1.addCallback(node_sync1.on_tx_success) self.assertEqual(len(downloader.pending_transactions), 1) details = downloader.pending_transactions[blocks[0].hash] self.assertEqual(len(details.connections), 1) self.assertEqual(len(downloader.downloading_deque), 1) deferred2 = downloader.get_tx(blocks[0].hash, node_sync2) deferred2.addCallback(node_sync2.on_tx_success) self.assertEqual(len(downloader.pending_transactions), 1) self.assertEqual( len(downloader.pending_transactions[blocks[0].hash].connections), 2) self.assertEqual(len(downloader.downloading_deque), 1) self.assertEqual(deferred1, deferred2) details.downloading_deferred.callback(blocks[0]) self.assertEqual(len(downloader.downloading_deque), 0) self.assertEqual(len(downloader.pending_transactions), 0) # Getting tx already downloaded downloader.get_tx(blocks[0].hash, node_sync1) self.assertEqual(len(downloader.downloading_deque), 0) # Adding fake tx_id to downloading deque downloader.downloading_deque.append('1') # Getting new tx downloader.get_tx(blocks[1].hash, node_sync1) self.assertEqual(len(downloader.pending_transactions), 1) details = downloader.pending_transactions[blocks[1].hash] self.assertEqual(len(details.connections), 1) self.assertEqual(len(downloader.downloading_deque), 2) details.downloading_deferred.callback(blocks[1]) # Still 2 elements because the first one is not downloaded yet self.assertEqual(len(downloader.downloading_deque), 2) # Remove it downloader.downloading_deque.popleft() # And try again downloader.check_downloading_queue() self.assertEqual(len(downloader.downloading_deque), 0)
def test_tx_propagation_nat_peers(self): """ manager1 <- manager2 <- manager3 """ self._add_new_blocks(25) manager2 = self.create_peer(self.network) conn1 = FakeConnection(self.manager1, manager2) for _ in range(1000): if conn1.is_empty(): break conn1.run_one_step() self.clock.advance(0.1) self.assertTipsEqual(self.manager1, manager2) self._add_new_blocks(1) for _ in range(1000): if conn1.is_empty(): break conn1.run_one_step() self.clock.advance(0.1) self.assertTipsEqual(self.manager1, manager2) manager3 = self.create_peer(self.network) conn2 = FakeConnection(manager2, manager3) for _ in range(1000): if conn1.is_empty() and conn2.is_empty(): break conn1.run_one_step() conn2.run_one_step() self.clock.advance(0.1) self.assertTipsEqual(self.manager1, manager2) self.assertTipsEqual(self.manager1, manager3) self._add_new_transactions(1) for _ in range(1000): if conn1.is_empty() and conn2.is_empty(): break conn1.run_one_step() conn2.run_one_step() self.clock.advance(0.1) self.assertTipsEqual(self.manager1, manager2) self.assertTipsEqual(self.manager1, manager3) self.assertConsensusEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager3) self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) self.assertConsensusValid(manager3)
class StatusTest(_BaseResourceTest._ResourceTest): def setUp(self): super().setUp() self.web = StubSite(StatusResource(self.manager)) self.manager2 = self.create_peer('testnet') self.conn1 = FakeConnection(self.manager, self.manager2) @inlineCallbacks def test_get(self): response = yield self.web.get("status") data = response.json_value() server_data = data.get('server') self.assertEqual(server_data['app_version'], 'Hathor v{}'.format(hathor.__version__)) self.assertEqual(server_data['network'], 'testnet') self.assertGreater(server_data['uptime'], 0) @inlineCallbacks def test_handshaking(self): response = yield self.web.get("status") data = response.json_value() server_data = data.get('server') known_peers = data.get('known_peers') connections = data.get('connections') self.assertEqual(server_data['app_version'], 'Hathor v{}'.format(hathor.__version__)) self.assertEqual(server_data['network'], 'testnet') self.assertGreater(server_data['uptime'], 0) handshake_peer = self.conn1.proto1.transport.getPeer() handshake_address = '{}:{}'.format(handshake_peer.host, handshake_peer.port) self.assertEqual(len(known_peers), 0) self.assertEqual(len(connections['connected_peers']), 0) self.assertEqual(len(connections['handshaking_peers']), 1) self.assertEqual(connections['handshaking_peers'][0]['address'], handshake_address) @inlineCallbacks def test_get_with_one_peer(self): self.conn1.run_one_step() # HELLO self.conn1.run_one_step() # PEER-ID self.conn1.run_one_step() # READY self.conn1.run_one_step() # BOTH PEERS ARE READY NOW response = yield self.web.get("status") data = response.json_value() server_data = data.get('server') known_peers = data.get('known_peers') connections = data.get('connections') self.assertEqual(server_data['app_version'], 'Hathor v{}'.format(hathor.__version__)) self.assertEqual(server_data['network'], 'testnet') self.assertGreater(server_data['uptime'], 0) self.assertEqual(len(known_peers), 1) self.assertEqual(known_peers[0]['id'], self.manager2.my_peer.id) self.assertEqual(len(connections['connected_peers']), 1) self.assertEqual(connections['connected_peers'][0]['id'], self.manager2.my_peer.id) @inlineCallbacks def test_connecting_peers(self): address = '192.168.1.1:54321' endpoint = endpoints.clientFromString(self.manager.reactor, 'tcp:{}'.format(address)) deferred = endpoint.connect self.manager.connections.connecting_peers[endpoint] = deferred response = yield self.web.get("status") data = response.json_value() connecting = data['connections']['connecting_peers'] self.assertEqual(len(connecting), 1) self.assertEqual(connecting[0]['address'], address) self.assertIsNotNone(connecting[0]['deferred'])