Ejemplo n.º 1
0
 def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
              udp_port: int, peer_port: int, rpc_timeout: float = constants.rpc_timeout,
              split_buckets_under_index: int = constants.split_buckets_under_index):
     self.peer_manager = peer_manager
     self.loop = loop
     self.node_id = node_id
     self.external_ip = external_ip
     self.udp_port = udp_port
     self.peer_port = peer_port
     self.is_seed_node = False
     self.partial_messages: typing.Dict[bytes, typing.Dict[bytes, bytes]] = {}
     self.sent_messages: typing.Dict[bytes, typing.Tuple['KademliaPeer', asyncio.Future, RequestDatagram]] = {}
     self.protocol_version = constants.protocol_version
     self.started_listening_time = 0
     self.transport: DatagramTransport = None
     self.old_token_secret = constants.generate_id()
     self.token_secret = constants.generate_id()
     self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
     self.data_store = DictDataStore(self.loop, self.peer_manager)
     self.ping_queue = PingQueue(self.loop, self)
     self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
     self.rpc_timeout = rpc_timeout
     self._split_lock = asyncio.Lock(loop=self.loop)
     self._to_remove: typing.Set['KademliaPeer'] = set()
     self._to_add: typing.Set['KademliaPeer'] = set()
     self._wakeup_routing_task = asyncio.Event(loop=self.loop)
     self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
Ejemplo n.º 2
0
    async def test_update_token(self):
        loop = asyncio.get_event_loop()
        with dht_mocks.mock_network_loop(loop):
            node_id1 = constants.generate_id()
            peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1,
                                     '1.2.3.4', 4444, 3333)
            peer2 = KademliaProtocol(loop, PeerManager(loop),
                                     constants.generate_id(), '1.2.3.5', 4444,
                                     3333)
            await loop.create_datagram_endpoint(lambda: peer1,
                                                ('1.2.3.4', 4444))
            await loop.create_datagram_endpoint(lambda: peer2,
                                                ('1.2.3.5', 4444))

            peer = peer2.peer_manager.get_kademlia_peer(node_id1,
                                                        '1.2.3.4',
                                                        udp_port=4444)
            self.assertEqual(None,
                             peer2.peer_manager.get_node_token(peer.node_id))
            await peer2.get_rpc_peer(peer).find_value(b'1' * 48)
            self.assertNotEqual(
                None, peer2.peer_manager.get_node_token(peer.node_id))
            peer1.stop()
            peer2.stop()
            peer1.disconnect()
            peer2.disconnect()
Ejemplo n.º 3
0
    async def test_add_peer_after_handle_request(self):
        with dht_mocks.mock_network_loop(self.loop):
            node_id1 = constants.generate_id()
            node_id2 = constants.generate_id()
            node_id3 = constants.generate_id()
            node_id4 = constants.generate_id()

            peer1 = KademliaProtocol(
                self.loop, PeerManager(self.loop), node_id1, '1.2.3.4', 4444, 3333
            )
            await self.loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444))
            peer1.start()

            peer2, peer_2_from_peer_1 = await self._make_protocol(peer1, node_id2, '1.2.3.5', 4444, 3333)
            peer3, peer_3_from_peer_1 = await self._make_protocol(peer1, node_id3, '1.2.3.6', 4444, 3333)
            peer4, peer_4_from_peer_1 = await self._make_protocol(peer1, node_id4, '1.2.3.7', 4444, 3333)

            # peers who reply should be added
            await peer1.get_rpc_peer(peer_2_from_peer_1).ping()
            await asyncio.sleep(0.5)
            self.assertListEqual([peer_2_from_peer_1], peer1.routing_table.get_peers())
            peer1.routing_table.remove_peer(peer_2_from_peer_1)

            # peers not known by be good/bad should be enqueued to maybe-ping
            peer1_from_peer3 = peer3.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444))
            self.assertEqual(0, len(peer1.ping_queue._pending_contacts))
            pong = await peer1_from_peer3.ping()
            self.assertEqual(b'pong', pong)
            self.assertEqual(1, len(peer1.ping_queue._pending_contacts))
            peer1.ping_queue._pending_contacts.clear()

            # peers who are already good should be added
            peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444))
            peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444)
            peer1.peer_manager.report_last_replied('1.2.3.7', 4444)
            self.assertEqual(0, len(peer1.ping_queue._pending_contacts))
            pong = await peer1_from_peer4.ping()
            self.assertEqual(b'pong', pong)
            await asyncio.sleep(0.5)
            self.assertEqual(1, len(peer1.routing_table.get_peers()))
            self.assertEqual(0, len(peer1.ping_queue._pending_contacts))
            peer1.routing_table.buckets[0].peers.clear()

            # peers who are known to be bad recently should not be added or maybe-pinged
            peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444))
            peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444)
            peer1.peer_manager.report_failure('1.2.3.7', 4444)
            peer1.peer_manager.report_failure('1.2.3.7', 4444)
            self.assertEqual(0, len(peer1.ping_queue._pending_contacts))
            pong = await peer1_from_peer4.ping()
            self.assertEqual(b'pong', pong)
            self.assertEqual(0, len(peer1.routing_table.get_peers()))
            self.assertEqual(0, len(peer1.ping_queue._pending_contacts))

            for p in [peer1, peer2, peer3, peer4]:
                p.stop()
                p.disconnect()
Ejemplo n.º 4
0
    async def test_store_to_peer(self):
        loop = asyncio.get_event_loop()
        with dht_mocks.mock_network_loop(loop):
            node_id1 = constants.generate_id()
            peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1,
                                     '1.2.3.4', 4444, 3333)
            peer2 = KademliaProtocol(loop, PeerManager(loop),
                                     constants.generate_id(), '1.2.3.5', 4444,
                                     3333)
            await loop.create_datagram_endpoint(lambda: peer1,
                                                ('1.2.3.4', 4444))
            await loop.create_datagram_endpoint(lambda: peer2,
                                                ('1.2.3.5', 4444))

            peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444)
            peer2_from_peer1 = make_kademlia_peer(peer2.node_id,
                                                  peer2.external_ip,
                                                  udp_port=peer2.udp_port)
            peer2_from_peer1.update_tcp_port(3333)
            peer3 = make_kademlia_peer(constants.generate_id(),
                                       '1.2.3.6',
                                       udp_port=4444)
            store_result = await peer2.store_to_peer(b'2' * 48, peer)
            self.assertEqual(store_result[0], peer.node_id)
            self.assertEqual(True, store_result[1])
            self.assertEqual(True,
                             peer1.data_store.has_peers_for_blob(b'2' * 48))
            self.assertEqual(False,
                             peer1.data_store.has_peers_for_blob(b'3' * 48))
            self.assertListEqual([peer2_from_peer1],
                                 peer1.data_store.get_storing_contacts())
            peer1.data_store.completed_blobs.add(
                binascii.hexlify(b'2' * 48).decode())
            find_value_response = peer1.node_rpc.find_value(peer3, b'2' * 48)
            self.assertEqual(len(find_value_response[b'contacts']), 0)
            self.assertSetEqual(
                {b'2' * 48, b'token', b'protocolVersion', b'contacts', b'p'},
                set(find_value_response.keys()))
            self.assertEqual(2, len(find_value_response[b'2' * 48]))
            self.assertEqual(find_value_response[b'2' * 48][0],
                             peer2_from_peer1.compact_address_tcp())
            self.assertDictEqual(bdecode(bencode(find_value_response)),
                                 find_value_response)

            find_value_page_above_pages_response = peer1.node_rpc.find_value(
                peer3, b'2' * 48, page=10)
            self.assertNotIn(b'2' * 48, find_value_page_above_pages_response)

            peer1.stop()
            peer2.stop()
            peer1.disconnect()
            peer2.disconnect()
Ejemplo n.º 5
0
 async def _test_network_context(self, peer_count=200):
     self.peer_addresses = [
         (constants.generate_id(i), socket.inet_ntoa(int(i + 0x01000001).to_bytes(length=4, byteorder='big')))
         for i in range(1, peer_count + 1)
     ]
     try:
         with dht_mocks.mock_network_loop(self.loop):
             await self.setup_node(self.peer_addresses, '1.2.3.1', constants.generate_id(1000))
             yield
     finally:
         self.blob_announcer.stop()
         self.node.stop()
         for n in self.nodes.values():
             n.stop()
Ejemplo n.º 6
0
    def test_add_peer(self):
        peer = make_kademlia_peer(constants.generate_id(2),
                                  "1.2.3.4",
                                  udp_port=4444)
        peer_update2 = make_kademlia_peer(constants.generate_id(2),
                                          "1.2.3.4",
                                          udp_port=4445)

        self.assertListEqual([], self.kbucket.peers)

        # add the peer
        self.kbucket.add_peer(peer)
        self.assertListEqual([peer], self.kbucket.peers)

        # re-add it
        self.kbucket.add_peer(peer)
        self.assertListEqual([peer], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4444)

        # add a new peer object with the same id and address but a different port
        self.kbucket.add_peer(peer_update2)
        self.assertListEqual([peer_update2], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4445)

        # modify the peer object to have a different port
        peer_update2.udp_port = 4444
        self.kbucket.add_peer(peer_update2)
        self.assertListEqual([peer_update2], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4444)

        self.kbucket.peers.clear()

        # Test if contacts can be added to empty list
        # Add k contacts to bucket
        for i in range(constants.k):
            peer = make_kademlia_peer(generate_id(),
                                      next(self.address_generator), 4444)
            self.assertTrue(self.kbucket.add_peer(peer))
            self.assertEqual(peer, self.kbucket.peers[i])

        # Test if contact is not added to full list
        peer = make_kademlia_peer(generate_id(), next(self.address_generator),
                                  4444)
        self.assertFalse(self.kbucket.add_peer(peer))

        # Test if an existing contact is updated correctly if added again
        existing_peer = self.kbucket.peers[0]
        self.assertTrue(self.kbucket.add_peer(existing_peer))
        self.assertEqual(existing_peer, self.kbucket.peers[-1])
Ejemplo n.º 7
0
async def main(host: str, port: int, db_file_path: str,
               bootstrap_node: Optional[str], prometheus_port: int):
    loop = asyncio.get_event_loop()
    conf = Config()
    storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
    if bootstrap_node:
        nodes = bootstrap_node.split(':')
        nodes = [(nodes[0], int(nodes[1]))]
    else:
        nodes = conf.known_dht_nodes
    await storage.open()
    node = Node(loop,
                PeerManager(loop),
                generate_id(),
                port,
                port,
                3333,
                None,
                storage=storage)
    if prometheus_port > 0:
        metrics = SimpleMetrics(prometheus_port, node)
        await metrics.start()
    node.start(host, nodes)
    while True:
        await asyncio.sleep(10)
        PEERS.labels('main').set(len(node.protocol.routing_table.get_peers()))
        BLOBS_STORED.labels('main').set(
            len(node.protocol.data_store.get_storing_contacts()))
        log.info(
            "Known peers: %d. Storing contact information for %d blobs from %d peers.",
            len(node.protocol.routing_table.get_peers()),
            len(node.protocol.data_store),
            len(node.protocol.data_store.get_storing_contacts()))
Ejemplo n.º 8
0
    async def test_popular_blob(self):
        peer_count = 150
        addresses = [
            (constants.generate_id(i + 1),
             socket.inet_ntoa(int(i + 1).to_bytes(length=4, byteorder='big')))
            for i in range(peer_count)
        ]
        blob_hash = b'1' * 48

        async with self._test_network_context(peer_addresses=addresses):
            total_seen = set()
            announced_to = self.nodes[0]
            for i in range(1, peer_count):
                node = self.nodes[i]
                kad_peer = announced_to.protocol.peer_manager.get_kademlia_peer(
                    node.protocol.node_id, node.protocol.external_ip,
                    node.protocol.udp_port)
                await announced_to.protocol._add_peer(kad_peer)
                peer = node.protocol.get_rpc_peer(
                    node.protocol.peer_manager.get_kademlia_peer(
                        announced_to.protocol.node_id,
                        announced_to.protocol.external_ip,
                        announced_to.protocol.udp_port))
                response = await peer.store(blob_hash)
                self.assertEqual(response, b'OK')
                peers_for_blob = await peer.find_value(blob_hash, 0)
                if i == 1:
                    self.assertTrue(blob_hash not in peers_for_blob)
                    self.assertEqual(peers_for_blob[b'p'], 0)
                else:
                    self.assertEqual(len(peers_for_blob[blob_hash]),
                                     min(i - 1, constants.k))
                    self.assertEqual(
                        len(
                            announced_to.protocol.data_store.
                            get_peers_for_blob(blob_hash)), i)
                if i - 1 > constants.k:
                    self.assertEqual(len(peers_for_blob[b'contacts']),
                                     constants.k)
                    self.assertEqual(peers_for_blob[b'p'],
                                     ((i - 1) // (constants.k + 1)) + 1)
                    seen = set(peers_for_blob[blob_hash])
                    self.assertEqual(len(seen), constants.k)
                    self.assertEqual(len(peers_for_blob[blob_hash]), len(seen))

                    for pg in range(1, peers_for_blob[b'p']):
                        page_x = await peer.find_value(blob_hash, pg)
                        self.assertNotIn(b'contacts', page_x)
                        page_x_set = set(page_x[blob_hash])
                        self.assertEqual(len(page_x[blob_hash]),
                                         len(page_x_set))
                        self.assertTrue(len(page_x_set) > 0)
                        self.assertSetEqual(seen.intersection(page_x_set),
                                            set())
                        seen.intersection_update(page_x_set)
                        total_seen.update(page_x_set)
                else:
                    self.assertEqual(len(peers_for_blob[b'contacts']), i - 1)
            self.assertEqual(len(total_seen), peer_count - 2)
Ejemplo n.º 9
0
    async def test_announce_blobs(self):
        blob1 = binascii.hexlify(b'1' * 48).decode()
        blob2 = binascii.hexlify(b'2' * 48).decode()

        async with self._test_network_context():
            await self.storage.add_blobs((blob1, 1024), (blob2, 1024),
                                         finished=True)
            await self.storage.db.execute(
                "update blob set next_announce_time=0, should_announce=1 where blob_hash in (?, ?)",
                (blob1, blob2))
            to_announce = await self.storage.get_blobs_to_announce()
            self.assertEqual(2, len(to_announce))
            self.blob_announcer.start(
                batch_size=1)  # so it covers batching logic
            # takes 60 seconds to start, but we advance 120 to ensure it processed all batches
            await self.advance(60.0 * 2)
            to_announce = await self.storage.get_blobs_to_announce()
            self.assertEqual(0, len(to_announce))
            self.blob_announcer.stop()

            # test that we can route from a poorly connected peer all the way to the announced blob

            await self.chain_peer(constants.generate_id(10), '1.2.3.10')
            await self.chain_peer(constants.generate_id(11), '1.2.3.11')
            await self.chain_peer(constants.generate_id(12), '1.2.3.12')
            await self.chain_peer(constants.generate_id(13), '1.2.3.13')
            await self.chain_peer(constants.generate_id(14), '1.2.3.14')
            await self.advance(61.0)

            last = self.nodes[len(self.nodes) - 1]
            search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(
                loop=self.loop)
            search_q.put_nowait(blob1)

            _, task = last.accumulate_peers(search_q, peer_q)
            found_peers = await peer_q.get()
            task.cancel()

            self.assertEqual(1, len(found_peers))
            self.assertEqual(self.node.protocol.node_id,
                             found_peers[0].node_id)
            self.assertEqual(self.node.protocol.external_ip,
                             found_peers[0].address)
            self.assertEqual(self.node.protocol.peer_port,
                             found_peers[0].tcp_port)
Ejemplo n.º 10
0
 def make_find_node(
         cls,
         from_node_id: bytes,
         key: bytes,
         rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
     rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
     if len(key) != constants.HASH_BITS // 8:
         raise ValueError(f"invalid key length: {len(key)}")
     return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findNode', [key])
Ejemplo n.º 11
0
 def __init__(self,
              protocol: 'KademliaProtocol',
              loop: asyncio.AbstractEventLoop,
              peer_port: int = 3333):
     self.protocol = protocol
     self.loop = loop
     self.peer_port = peer_port
     self.old_token_secret: bytes = None
     self.token_secret = constants.generate_id()
Ejemplo n.º 12
0
 async def test_split_buckets(self):
     loop = asyncio.get_event_loop()
     peer_addresses = [
         (constants.generate_id(1), '1.2.3.1'),
     ]
     for i in range(2, 200):
         peer_addresses.append((constants.generate_id(i), f'1.2.3.{i}'))
     with dht_mocks.mock_network_loop(loop):
         nodes = {
             i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333,
                     address)
             for i, (node_id, address) in enumerate(peer_addresses)
         }
         node_1 = nodes[0]
         for i in range(1, len(peer_addresses)):
             node = nodes[i]
             peer = node_1.protocol.peer_manager.get_kademlia_peer(
                 node.protocol.node_id,
                 node.protocol.external_ip,
                 udp_port=node.protocol.udp_port)
             # set all of the peers to good (as to not attempt pinging stale ones during split)
             node_1.protocol.peer_manager.report_last_replied(
                 peer.address, peer.udp_port)
             node_1.protocol.peer_manager.report_last_replied(
                 peer.address, peer.udp_port)
             await node_1.protocol._add_peer(peer)
             # check that bucket 0 is always the one covering the local node id
             self.assertEqual(
                 True,
                 node_1.protocol.routing_table.buckets[0].key_in_range(
                     node_1.protocol.node_id))
         self.assertEqual(40,
                          len(node_1.protocol.routing_table.get_peers()))
         self.assertEqual(len(expected_ranges),
                          len(node_1.protocol.routing_table.buckets))
         covered = 0
         for (expected_min, expected_max), bucket in zip(
                 expected_ranges, node_1.protocol.routing_table.buckets):
             self.assertEqual(expected_min, bucket.range_min)
             self.assertEqual(expected_max, bucket.range_max)
             covered += bucket.range_max - bucket.range_min
         self.assertEqual(2**384, covered)
         for node in nodes.values():
             node.stop()
Ejemplo n.º 13
0
 async def test_cant_add_peer_without_a_node_id_gracefully(self):
     loop = asyncio.get_event_loop()
     node = Node(loop, PeerManager(loop), constants.generate_id(), 4444,
                 4444, 3333, '1.2.3.4')
     bad_peer = make_kademlia_peer(None, '1.2.3.4', 5555)
     with self.assertLogs(level='WARNING') as logged:
         self.assertFalse(await node.protocol._add_peer(bad_peer))
         self.assertEqual(1, len(logged.output))
         self.assertTrue(logged.output[0].endswith(
             'Tried adding a peer with no node id!'))
Ejemplo n.º 14
0
    async def test_ping(self):
        loop = asyncio.get_event_loop()
        with dht_mocks.mock_network_loop(loop):
            node_id1 = constants.generate_id()
            peer1 = KademliaProtocol(
                loop, PeerManager(loop), node_id1, '1.2.3.4', 4444, 3333
            )
            peer2 = KademliaProtocol(
                loop, PeerManager(loop), constants.generate_id(), '1.2.3.5', 4444, 3333
            )
            await loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444))
            await loop.create_datagram_endpoint(lambda: peer2, ('1.2.3.5', 4444))

            peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444)
            result = await peer2.get_rpc_peer(peer).ping()
            self.assertEqual(result, b'pong')
            peer1.stop()
            peer2.stop()
            peer1.disconnect()
            peer2.disconnect()
Ejemplo n.º 15
0
 async def test_get_token_on_announce(self):
     await self.setup_network(2, seed_nodes=2)
     node1, node2 = self.nodes
     node1.protocol.peer_manager.clear_token(node2.protocol.node_id)
     blob_hash = hexlify(constants.generate_id(1337)).decode()
     node_ids = await node1.announce_blob(blob_hash)
     self.assertIn(node2.protocol.node_id, node_ids)
     node2.protocol.node_rpc.refresh_token()
     node_ids = await node1.announce_blob(blob_hash)
     self.assertIn(node2.protocol.node_id, node_ids)
     node2.protocol.node_rpc.refresh_token()
     node_ids = await node1.announce_blob(blob_hash)
     self.assertIn(node2.protocol.node_id, node_ids)
Ejemplo n.º 16
0
 def make_find_value(cls,
                     from_node_id: bytes,
                     key: bytes,
                     rpc_id: typing.Optional[bytes] = None,
                     page: int = 0) -> 'RequestDatagram':
     rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
     if len(key) != constants.HASH_BITS // 8:
         raise ValueError(f"invalid key length: {len(key)}")
     if page < 0:
         raise ValueError(f"cannot request a negative page ({page})")
     return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findValue',
                [key, {
                    PAGE_KEY: page
                }])
Ejemplo n.º 17
0
    async def setup_network(self,
                            size: int,
                            start_port=40000,
                            seed_nodes=1,
                            external_ip='127.0.0.1'):
        for i in range(size):
            node_port = start_port + i
            node_id = constants.generate_id(i)
            node = await self.create_node(node_id, node_port)
            self.nodes.append(node)
            self.known_node_addresses.append((external_ip, node_port))

        for node in self.nodes:
            node.start(external_ip, self.known_node_addresses[:seed_nodes])
Ejemplo n.º 18
0
 async def _test_network_context(self, peer_addresses=None):
     self.peer_addresses = peer_addresses or [
         (constants.generate_id(2), '1.2.3.2'),
         (constants.generate_id(3), '1.2.3.3'),
         (constants.generate_id(4), '1.2.3.4'),
         (constants.generate_id(5), '1.2.3.5'),
         (constants.generate_id(6), '1.2.3.6'),
         (constants.generate_id(7), '1.2.3.7'),
         (constants.generate_id(8), '1.2.3.8'),
         (constants.generate_id(9), '1.2.3.9'),
     ]
     try:
         with dht_mocks.mock_network_loop(self.loop):
             await self.setup_node(self.peer_addresses, '1.2.3.1', constants.generate_id(1))
             yield
     finally:
         self.blob_announcer.stop()
         self.node.stop()
         for n in self.nodes.values():
             n.stop()
Ejemplo n.º 19
0
 def make_store(cls,
                from_node_id: bytes,
                blob_hash: bytes,
                token: bytes,
                port: int,
                rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
     rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
     if len(blob_hash) != constants.HASH_BITS // 8:
         raise ValueError(f"invalid blob hash length: {len(blob_hash)}")
     if not 0 < port < 65536:
         raise ValueError(f"invalid port: {port}")
     if len(token) != constants.HASH_BITS // 8:
         raise ValueError(f"invalid token length: {len(token)}")
     store_args = [blob_hash, token, port, from_node_id, 0]
     return cls(REQUEST_TYPE, rpc_id, from_node_id, b'store', store_args)
Ejemplo n.º 20
0
 async def setup_network(self, size: int, start_port=40000, seed_nodes=1):
     for i in range(size):
         node_port = start_port + i
         node = Node(self.loop, PeerManager(self.loop), node_id=constants.generate_id(i),
                                udp_port=node_port, internal_udp_port=node_port,
                                peer_port=3333, external_ip='127.0.0.1')
         self.nodes.append(node)
         self.known_node_addresses.append(('127.0.0.1', node_port))
         await node.start_listening('127.0.0.1')
         self.addCleanup(node.stop)
     for node in self.nodes:
         node.protocol.rpc_timeout = .5
         node.protocol.ping_queue._default_delay = .5
         node.start('127.0.0.1', self.known_node_addresses[:seed_nodes])
     await asyncio.gather(*[node.joined.wait() for node in self.nodes])
Ejemplo n.º 21
0
    async def test_announce_blobs(self):
        blob1 = binascii.hexlify(b'1' * 48).decode()
        blob2 = binascii.hexlify(b'2' * 48).decode()

        async with self._test_network_context(peer_count=100):
            await self.storage.add_blobs((blob1, 1024, 0, True), (blob2, 1024, 0, True), finished=True)
            await self.storage.add_blobs(
                *((constants.generate_id(value).hex(), 1024, 0, True) for value in range(1000, 1090)),
                finished=True)
            await self.storage.db.execute("update blob set next_announce_time=0, should_announce=1")
            to_announce = await self.storage.get_blobs_to_announce()
            self.assertEqual(92, len(to_announce))
            self.blob_announcer.start(batch_size=10)  # so it covers batching logic
            # takes 60 seconds to start, but we advance 120 to ensure it processed all batches
            ongoing_announcements = asyncio.ensure_future(self.blob_announcer.wait())
            await self.instant_advance(60.0)
            await ongoing_announcements
            to_announce = await self.storage.get_blobs_to_announce()
            self.assertEqual(0, len(to_announce))
            self.blob_announcer.stop()

            # as routing table pollution will cause some peers to be hard to reach, we add a tolerance for CI
            tolerance = 0.8  # at least 80% of the announcements are within the top K
            for blob in await self.storage.get_all_blob_hashes():
                distance = Distance(bytes.fromhex(blob))
                candidates = list(self.nodes.values())
                candidates.sort(key=lambda sorting_node: distance(sorting_node.protocol.node_id))
                has_it = 0
                for index, node in enumerate(candidates[:constants.K], start=1):
                    if node.protocol.data_store.get_peers_for_blob(bytes.fromhex(blob)):
                        has_it += 1
                    else:
                        logging.warning("blob %s wasnt found between the best K (%s)", blob[:8], node.protocol.node_id.hex()[:8])
                self.assertGreaterEqual(has_it, int(tolerance * constants.K))


            # test that we can route from a poorly connected peer all the way to the announced blob

            current = len(self.nodes)
            await self.chain_peer(constants.generate_id(current + 1), '1.2.3.10')
            await self.chain_peer(constants.generate_id(current + 2), '1.2.3.11')
            await self.chain_peer(constants.generate_id(current + 3), '1.2.3.12')
            await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13')
            last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14')

            search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop)
            search_q.put_nowait(blob1)

            _, task = last.accumulate_peers(search_q, peer_q)
            found_peers = await asyncio.wait_for(peer_q.get(), 1.0)
            task.cancel()

            self.assertEqual(1, len(found_peers))
            self.assertEqual(self.node.protocol.node_id, found_peers[0].node_id)
            self.assertEqual(self.node.protocol.external_ip, found_peers[0].address)
            self.assertEqual(self.node.protocol.peer_port, found_peers[0].tcp_port)
Ejemplo n.º 22
0
    async def test_fill_one_bucket(self):
        loop = asyncio.get_event_loop()
        peer_addresses = [
            (constants.generate_id(1), '1.2.3.1'),
            (constants.generate_id(2), '1.2.3.2'),
            (constants.generate_id(3), '1.2.3.3'),
            (constants.generate_id(4), '1.2.3.4'),
            (constants.generate_id(5), '1.2.3.5'),
            (constants.generate_id(6), '1.2.3.6'),
            (constants.generate_id(7), '1.2.3.7'),
            (constants.generate_id(8), '1.2.3.8'),
            (constants.generate_id(9), '1.2.3.9'),
        ]
        with dht_mocks.mock_network_loop(loop):
            nodes = {
                i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333,
                        address)
                for i, (node_id, address) in enumerate(peer_addresses)
            }
            node_1 = nodes[0]
            contact_cnt = 0
            for i in range(1, len(peer_addresses)):
                self.assertEqual(
                    len(node_1.protocol.routing_table.get_peers()),
                    contact_cnt)
                node = nodes[i]
                peer = node_1.protocol.peer_manager.get_kademlia_peer(
                    node.protocol.node_id,
                    node.protocol.external_ip,
                    udp_port=node.protocol.udp_port)
                added = await node_1.protocol._add_peer(peer)
                self.assertEqual(True, added)
                contact_cnt += 1

            self.assertEqual(len(node_1.protocol.routing_table.get_peers()), 8)
            self.assertEqual(
                node_1.protocol.routing_table.buckets_with_contacts(), 1)
            for node in nodes.values():
                node.protocol.stop()
Ejemplo n.º 23
0
    async def test_popular_blob(self):
        peer_count = 150
        blob_hash = constants.generate_id(99999)

        async with self._test_network_context(peer_count=peer_count):
            total_seen = set()
            announced_to = self.nodes.pop(0)
            for i, node in enumerate(self.nodes.values()):
                self.add_peer_to_routing_table(announced_to, node)
                peer = node.protocol.get_rpc_peer(
                    make_kademlia_peer(
                        announced_to.protocol.node_id,
                        announced_to.protocol.external_ip,
                        announced_to.protocol.udp_port
                    )
                )
                response = await peer.store(blob_hash)
                self.assertEqual(response, b'OK')
                peers_for_blob = await peer.find_value(blob_hash, 0)
                if i == 0:
                    self.assertNotIn(blob_hash, peers_for_blob)
                    self.assertEqual(peers_for_blob[b'p'], 0)
                else:
                    self.assertEqual(len(peers_for_blob[blob_hash]), min(i, constants.K))
                    self.assertEqual(len(announced_to.protocol.data_store.get_peers_for_blob(blob_hash)), i + 1)
                if i - 1 > constants.K:
                    self.assertEqual(len(peers_for_blob[b'contacts']), constants.K)
                    self.assertEqual(peers_for_blob[b'p'], (i // (constants.K + 1)) + 1)
                    seen = set(peers_for_blob[blob_hash])
                    self.assertEqual(len(seen), constants.K)
                    self.assertEqual(len(peers_for_blob[blob_hash]), len(seen))

                    for pg in range(1, peers_for_blob[b'p']):
                        page_x = await peer.find_value(blob_hash, pg)
                        self.assertNotIn(b'contacts', page_x)
                        page_x_set = set(page_x[blob_hash])
                        self.assertEqual(len(page_x[blob_hash]), len(page_x_set))
                        self.assertGreater(len(page_x_set), 0)
                        self.assertSetEqual(seen.intersection(page_x_set), set())
                        seen.intersection_update(page_x_set)
                        total_seen.update(page_x_set)
                else:
                    self.assertEqual(len(peers_for_blob[b'contacts']), 8)  # we always add 8 on first page
            self.assertEqual(len(total_seen), peer_count - 2)
Ejemplo n.º 24
0
    async def test_peer_persistance(self):
        num_nodes = 6
        start_port = 40000
        num_seeds = 2
        external_ip = '127.0.0.1'

        # Start a node
        await self.setup_network(num_nodes,
                                 start_port=start_port,
                                 seed_nodes=num_seeds)
        await asyncio.gather(*[node.joined.wait() for node in self.nodes])

        node1 = self.nodes[-1]
        peer_args = [(n.protocol.node_id, n.protocol.external_ip,
                      n.protocol.udp_port, n.protocol.peer_port)
                     for n in self.nodes[:num_seeds]]
        peers = [make_kademlia_peer(*args) for args in peer_args]

        # node1 is bootstrapped from the fixed seeds
        self.assertCountEqual(peers, node1.protocol.routing_table.get_peers())

        # Refresh and assert that the peers were persisted
        await node1.refresh_node(True)
        self.assertEqual(
            len(peer_args),
            len(await node1._storage.get_persisted_kademlia_peers()))
        node1.stop()

        # Start a fresh node with the same node_id and storage, but no known peers
        node2 = await self.create_node(constants.generate_id(num_nodes - 1),
                                       start_port + num_nodes - 1)
        node2._storage = node1._storage
        node2.start(external_ip, [])
        await node2.joined.wait()

        # The peers are restored
        self.assertEqual(num_seeds,
                         len(node2.protocol.routing_table.get_peers()))
        for bucket1, bucket2 in zip(node1.protocol.routing_table.buckets,
                                    node2.protocol.routing_table.buckets):
            self.assertEqual((bucket1.range_min, bucket1.range_max),
                             (bucket2.range_min, bucket2.range_max))
Ejemplo n.º 25
0
async def main(host: str, port: int, db_file_path: str,
               bootstrap_node: Optional[str], prometheus_port: int,
               export: bool):
    loop = asyncio.get_event_loop()
    conf = Config()
    if not db_file_path.startswith(':memory:'):
        node_id_file_path = db_file_path + 'node_id'
        if os.path.exists(node_id_file_path):
            with open(node_id_file_path, 'rb') as node_id_file:
                node_id = node_id_file.read()
        else:
            with open(node_id_file_path, 'wb') as node_id_file:
                node_id = generate_id()
                node_id_file.write(node_id)

    storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
    if bootstrap_node:
        nodes = bootstrap_node.split(':')
        nodes = [(nodes[0], int(nodes[1]))]
    else:
        nodes = conf.known_dht_nodes
    await storage.open()
    node = Node(loop,
                PeerManager(loop),
                node_id,
                port,
                port,
                3333,
                None,
                storage=storage)
    if prometheus_port > 0:
        metrics = SimpleMetrics(prometheus_port, node if export else None)
        await metrics.start()
    node.start(host, nodes)
    log.info("Peer with id %s started", node_id.hex())
    while True:
        await asyncio.sleep(10)
        log.info(
            "Known peers: %d. Storing contact information for %d blobs from %d peers.",
            len(node.protocol.routing_table.get_peers()),
            len(node.protocol.data_store),
            len(node.protocol.data_store.get_storing_contacts()))
Ejemplo n.º 26
0
    async def test_ping_queue_discover(self):
        loop = asyncio.get_event_loop()
        loop.set_debug(False)

        peer_addresses = [
            (constants.generate_id(1), '1.2.3.1'),
            (constants.generate_id(2), '1.2.3.2'),
            (constants.generate_id(3), '1.2.3.3'),
            (constants.generate_id(4), '1.2.3.4'),
            (constants.generate_id(5), '1.2.3.5'),
            (constants.generate_id(6), '1.2.3.6'),
            (constants.generate_id(7), '1.2.3.7'),
            (constants.generate_id(8), '1.2.3.8'),
            (constants.generate_id(9), '1.2.3.9'),
        ]
        with dht_mocks.mock_network_loop(loop):
            advance = dht_mocks.get_time_accelerator(loop)
            # start the nodes
            nodes: typing.Dict[int, Node] = {
                i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333,
                        address)
                for i, (node_id, address) in enumerate(peer_addresses)
            }
            for i, n in nodes.items():
                n.start(peer_addresses[i][1], [])

            await advance(1)

            node_1 = nodes[0]

            # ping 8 nodes from node_1, this will result in a delayed return ping
            futs = []
            for i in range(1, len(peer_addresses)):
                node = nodes[i]
                assert node.protocol.node_id != node_1.protocol.node_id
                peer = make_kademlia_peer(node.protocol.node_id,
                                          node.protocol.external_ip,
                                          udp_port=node.protocol.udp_port)
                futs.append(node_1.protocol.get_rpc_peer(peer).ping())
            await advance(3)
            replies = await asyncio.gather(*tuple(futs))
            self.assertTrue(all(map(lambda reply: reply == b"pong", replies)))

            # run for long enough for the delayed pings to have been sent by node 1
            await advance(1000)

            # verify all of the previously pinged peers have node_1 in their routing tables
            for n in nodes.values():
                peers = n.protocol.routing_table.get_peers()
                if n is node_1:
                    self.assertEqual(8, len(peers))
                # TODO: figure out why this breaks
                # else:
                #     self.assertEqual(1, len(peers))
                #     self.assertEqual((peers[0].node_id, peers[0].address, peers[0].udp_port),
                #                      (node_1.protocol.node_id, node_1.protocol.external_ip, node_1.protocol.udp_port))

            # run long enough for the refresh loop to run
            await advance(3600)

            # verify all the nodes know about each other
            for n in nodes.values():
                if n is node_1:
                    continue
                peers = n.protocol.routing_table.get_peers()
                self.assertEqual(8, len(peers))
                self.assertSetEqual(
                    {
                        n_id[0]
                        for n_id in peer_addresses
                        if n_id[0] != n.protocol.node_id
                    }, {c.node_id
                        for c in peers})
                self.assertSetEqual(
                    {
                        n_addr[1]
                        for n_addr in peer_addresses
                        if n_addr[1] != n.protocol.external_ip
                    }, {c.address
                        for c in peers})

            # teardown
            for n in nodes.values():
                n.stop()
Ejemplo n.º 27
0
    async def test_losing_connection(self):
        async def wait_for(check_ok, insist, timeout=20):
            start = time.time()
            while time.time() - start < timeout:
                if check_ok():
                    break
                await asyncio.sleep(0)
            else:
                insist()

        loop = self.loop
        loop.set_debug(False)

        peer_addresses = [('1.2.3.4', 40000 + i) for i in range(10)]
        node_ids = [constants.generate_id(i) for i in range(10)]

        nodes = [
            Node(loop,
                 PeerManager(loop),
                 node_id,
                 udp_port,
                 udp_port,
                 3333,
                 address,
                 storage=SQLiteStorage(Config(), ":memory:", self.loop,
                                       self.loop.time))
            for node_id, (address, udp_port) in zip(node_ids, peer_addresses)
        ]
        dht_network = {
            peer_addresses[i]: node.protocol
            for i, node in enumerate(nodes)
        }
        num_seeds = 3

        with dht_mocks.mock_network_loop(loop, dht_network):
            for i, n in enumerate(nodes):
                await n._storage.open()
                self.addCleanup(n.stop)
                n.start(peer_addresses[i][0], peer_addresses[:num_seeds])
            await asyncio.gather(*[n.joined.wait() for n in nodes])

            node = nodes[-1]
            advance = dht_mocks.get_time_accelerator(loop)
            await advance(500)

            # Join the network, assert that at least the known peers are in RT
            self.assertTrue(node.joined.is_set())
            self.assertTrue(
                len(node.protocol.routing_table.get_peers()) >= num_seeds)

            # Refresh, so that the peers are persisted
            self.assertFalse(
                len(await node._storage.get_persisted_kademlia_peers()) >
                num_seeds)
            await advance(4000)
            self.assertTrue(
                len(await node._storage.get_persisted_kademlia_peers()) >
                num_seeds)

            # We lost internet connection - all the peers stop responding
            dht_network.pop(
                (node.protocol.external_ip, node.protocol.udp_port))

            # The peers are cleared on refresh from RT and storage
            await advance(4000)
            self.assertListEqual([], await
                                 node._storage.get_persisted_kademlia_peers())
            await wait_for(
                lambda: len(node.protocol.routing_table.get_peers()) == 0,
                lambda: self.assertListEqual(
                    node.protocol.routing_table.get_peers(), []))

            # Reconnect
            dht_network[(node.protocol.external_ip,
                         node.protocol.udp_port)] = node.protocol

            # Check that node reconnects at least to them
            await advance(1000)
            await wait_for(
                lambda: len(node.protocol.routing_table.get_peers()) >=
                num_seeds, lambda: self.assertGreaterEqual(
                    len(node.protocol.routing_table.get_peers()), num_seeds))
Ejemplo n.º 28
0
 async def test_announce_no_peers(self):
     await self.setup_network(1)
     node = self.nodes[0]
     blob_hash = hexlify(constants.generate_id(1337)).decode()
     peers = await node.announce_blob(blob_hash)
     self.assertEqual(len(peers), 0)
Ejemplo n.º 29
0
 def change_token(self):
     self.old_token_secret = self.token_secret
     self.token_secret = constants.generate_id()
Ejemplo n.º 30
0
 def refresh_token(self):  # TODO: this needs to be called periodically
     self.old_token_secret = self.token_secret
     self.token_secret = constants.generate_id()