class DHTComponent(Component): component_name = DHT_COMPONENT depends_on = [UPNP_COMPONENT] def __init__(self, component_manager): super().__init__(component_manager) self.dht_node: Node = None self.upnp_component = None self.external_udp_port = None self.external_peer_port = None @property def component(self) -> typing.Optional[Node]: return self.dht_node async def get_status(self): return { 'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id), 'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers()) } def get_node_id(self): node_id_filename = os.path.join(self.conf.data_dir, "node_id") if os.path.isfile(node_id_filename): with open(node_id_filename, "r") as node_id_file: return base58.b58decode(str(node_id_file.read()).strip()) node_id = utils.generate_id() with open(node_id_filename, "w") as node_id_file: node_id_file.write(base58.b58encode(node_id).decode()) return node_id async def start(self): log.info("start the dht") self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT) self.external_peer_port = self.upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port) self.external_udp_port = self.upnp_component.upnp_redirects.get("UDP", self.conf.udp_port) external_ip = self.upnp_component.external_ip if not external_ip: log.warning("UPnP component failed to get external ip") external_ip = await get_external_ip() if not external_ip: log.warning("failed to get external ip") self.dht_node = Node( asyncio.get_event_loop(), self.component_manager.peer_manager, node_id=self.get_node_id(), internal_udp_port=self.conf.udp_port, udp_port=self.external_udp_port, external_ip=external_ip, peer_port=self.external_peer_port, rpc_timeout=self.conf.node_rpc_timeout ) self.dht_node.start( interface=self.conf.network_interface, known_node_urls=self.conf.known_dht_nodes ) log.info("Started the dht") async def stop(self): self.dht_node.stop()
class DHTComponent(Component): component_name = DHT_COMPONENT depends_on = [UPNP_COMPONENT] def __init__(self, component_manager): super().__init__(component_manager) self.dht_node = None self.upnp_component = None self.external_udp_port = None self.external_peer_port = None @property def component(self): return self.dht_node def get_status(self): return { 'node_id': binascii.hexlify(conf.settings.get_node_id()), 'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.contacts) } @defer.inlineCallbacks def start(self): self.upnp_component = self.component_manager.get_component( UPNP_COMPONENT) self.external_peer_port = self.upnp_component.upnp_redirects.get( "TCP", conf.settings["peer_port"]) self.external_udp_port = self.upnp_component.upnp_redirects.get( "UDP", conf.settings["dht_node_port"]) node_id = conf.settings.get_node_id() if node_id is None: node_id = generate_id() external_ip = self.upnp_component.external_ip if not external_ip: log.warning("UPnP component failed to get external ip") external_ip = yield get_external_ip() if not external_ip: log.warning("failed to get external ip") self.dht_node = Node(node_id=node_id, udpPort=conf.settings['dht_node_port'], externalUDPPort=self.external_udp_port, externalIP=external_ip, peerPort=self.external_peer_port) yield self.dht_node.start(conf.settings['known_dht_nodes'], block_on_join=False) log.info("Started the dht") @defer.inlineCallbacks def stop(self): yield self.dht_node.stop()
class TestBlobAnnouncer(AsyncioTestCase): async def setup_node(self, peer_addresses, address, node_id): self.nodes: typing.Dict[int, Node] = {} self.advance = dht_mocks.get_time_accelerator(self.loop, self.loop.time()) self.conf = Config() self.storage = SQLiteStorage(self.conf, ":memory:", self.loop, self.loop.time) await self.storage.open() self.peer_manager = PeerManager(self.loop) self.node = Node(self.loop, self.peer_manager, node_id, 4444, 4444, 3333, address) await self.node.start_listening(address) self.blob_announcer = BlobAnnouncer(self.loop, self.node, self.storage) for node_id, address in peer_addresses: await self.add_peer(node_id, address) self.node.joined.set() async def add_peer(self, node_id, address, add_to_routing_table=True): n = Node(self.loop, PeerManager(self.loop), node_id, 4444, 4444, 3333, address) await n.start_listening(address) self.nodes.update({len(self.nodes): n}) if add_to_routing_table: await self.node.protocol.add_peer( self.peer_manager.get_kademlia_peer( n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port ) ) @contextlib.asynccontextmanager async def _test_network_context(self, peer_addresses=None): self.peer_addresses = peer_addresses or [ (constants.generate_id(2), '1.2.3.2'), (constants.generate_id(3), '1.2.3.3'), (constants.generate_id(4), '1.2.3.4'), (constants.generate_id(5), '1.2.3.5'), (constants.generate_id(6), '1.2.3.6'), (constants.generate_id(7), '1.2.3.7'), (constants.generate_id(8), '1.2.3.8'), (constants.generate_id(9), '1.2.3.9'), ] try: with dht_mocks.mock_network_loop(self.loop): await self.setup_node(self.peer_addresses, '1.2.3.1', constants.generate_id(1)) yield finally: self.blob_announcer.stop() self.node.stop() for n in self.nodes.values(): n.stop() async def chain_peer(self, node_id, address): previous_last_node = self.nodes[len(self.nodes) - 1] await self.add_peer(node_id, address, False) last_node = self.nodes[len(self.nodes) - 1] peer = last_node.protocol.get_rpc_peer( last_node.protocol.peer_manager.get_kademlia_peer( previous_last_node.protocol.node_id, previous_last_node.protocol.external_ip, previous_last_node.protocol.udp_port ) ) await peer.ping() return peer async def test_announce_blobs(self): blob1 = binascii.hexlify(b'1' * 48).decode() blob2 = binascii.hexlify(b'2' * 48).decode() async with self._test_network_context(): await self.storage.add_completed_blob(blob1, 1024) await self.storage.add_completed_blob(blob2, 1024) await self.storage.db.execute( "update blob set next_announce_time=0, should_announce=1 where blob_hash in (?, ?)", (blob1, blob2) ) to_announce = await self.storage.get_blobs_to_announce() self.assertEqual(2, len(to_announce)) self.blob_announcer.start() await self.advance(61.0) to_announce = await self.storage.get_blobs_to_announce() self.assertEqual(0, len(to_announce)) self.blob_announcer.stop() # test that we can route from a poorly connected peer all the way to the announced blob await self.chain_peer(constants.generate_id(10), '1.2.3.10') await self.chain_peer(constants.generate_id(11), '1.2.3.11') await self.chain_peer(constants.generate_id(12), '1.2.3.12') await self.chain_peer(constants.generate_id(13), '1.2.3.13') await self.chain_peer(constants.generate_id(14), '1.2.3.14') last = self.nodes[len(self.nodes) - 1] search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop) search_q.put_nowait(blob1) _, task = last.accumulate_peers(search_q, peer_q) found_peers = await peer_q.get() task.cancel() self.assertEqual(1, len(found_peers)) self.assertEqual(self.node.protocol.node_id, found_peers[0].node_id) self.assertEqual(self.node.protocol.external_ip, found_peers[0].address) self.assertEqual(self.node.protocol.peer_port, found_peers[0].tcp_port)
class NodeRPC(AuthJSONRPCServer): def __init__(self, lbryid, seeds, node_port, rpc_port): AuthJSONRPCServer.__init__(self, False) self.root = None self.port = None self.seeds = seeds self.node_port = node_port self.rpc_port = rpc_port if lbryid: lbryid = lbryid.decode('hex') else: lbryid = generate_id() self.node_id = lbryid self.external_ip = get_external_ip_and_setup_upnp() self.node_port = node_port @defer.inlineCallbacks def setup(self): self.node = Node(node_id=self.node_id, udpPort=self.node_port, externalIP=self.external_ip) hosts = [] for hostname, hostport in self.seeds: host_ip = yield reactor.resolve(hostname) hosts.append((host_ip, hostport)) log.info("connecting to dht") yield self.node.joinNetwork(tuple(hosts)) log.info("connected to dht") if not self.announced_startup: self.announced_startup = True self.start_api() log.info("lbry id: %s (%i bytes)", self.node.node_id.encode('hex'), len(self.node.node_id)) def start_api(self): root = resource.Resource() root.putChild('', self) self.port = reactor.listenTCP(self.rpc_port, Site(root), interface='localhost') log.info("started jsonrpc server") @defer.inlineCallbacks def jsonrpc_node_id_set(self, node_id): old_id = self.node.node_id self.node.stop() del self.node self.node_id = node_id.decode('hex') yield self.setup() msg = "changed dht id from %s to %s" % ( old_id.encode('hex'), self.node.node_id.encode('hex')) defer.returnValue(msg) def jsonrpc_node_id_get(self): return self._render_response(self.node.node_id.encode('hex')) @defer.inlineCallbacks def jsonrpc_peer_find(self, node_id): node_id = node_id.decode('hex') contact = yield self.node.findContact(node_id) result = None if contact: result = (contact.address, contact.port) defer.returnValue(result) @defer.inlineCallbacks def jsonrpc_peer_list_for_blob(self, blob_hash): peers = yield self.node.getPeersForBlob(blob_hash.decode('hex')) defer.returnValue(peers) @defer.inlineCallbacks def jsonrpc_ping(self, node_id): contact_host = yield self.jsonrpc_peer_find(node_id=node_id) if not contact_host: defer.returnValue("failed to find node") contact_ip, contact_port = contact_host contact = Contact(node_id.decode('hex'), contact_ip, contact_port, self.node._protocol) try: result = yield contact.ping() except TimeoutError: self.node.removeContact(contact.id) self.node._dataStore.removePeer(contact.id) result = {'error': 'timeout'} defer.returnValue(result) def get_routing_table(self): result = {} data_store = deepcopy(self.node._dataStore._dict) datastore_len = len(data_store) hosts = {} missing_contacts = [] if datastore_len: for k, v in data_store.iteritems(): for value, lastPublished, originallyPublished, originalPublisherID in v: try: contact = self.node._routingTable.getContact( originalPublisherID) except ValueError: if originalPublisherID.encode( 'hex') not in missing_contacts: missing_contacts.append( originalPublisherID.encode('hex')) continue if contact in hosts: blobs = hosts[contact] else: blobs = [] blobs.append(k.encode('hex')) hosts[contact] = blobs contact_set = [] blob_hashes = [] result['buckets'] = {} for i in range(len(self.node._routingTable._buckets)): for contact in self.node._routingTable._buckets[i]._contacts: contacts = result['buckets'].get(i, []) if contact in hosts: blobs = hosts[contact] del hosts[contact] else: blobs = [] host = { "address": contact.address, "id": contact.id.encode("hex"), "blobs": blobs, } for blob_hash in blobs: if blob_hash not in blob_hashes: blob_hashes.append(blob_hash) contacts.append(host) result['buckets'][i] = contacts contact_set.append(contact.id.encode("hex")) if hosts: result['datastore extra'] = [{ "id": host.id.encode('hex'), "blobs": hosts[host], } for host in hosts] result['missing contacts'] = missing_contacts result['contacts'] = contact_set result['blob hashes'] = blob_hashes result['node id'] = self.node_id.encode('hex') return result def jsonrpc_routing_table_get(self): return self._render_response(self.get_routing_table())
class KademliaProtocolTest(unittest.TestCase): """ Test case for the Protocol class """ udpPort = 9182 def setUp(self): self._reactor = Clock() self.node = Node(node_id=b'1' * 48, udpPort=self.udpPort, externalIP="127.0.0.1", listenUDP=listenUDP, resolve=resolve, clock=self._reactor, callLater=self._reactor.callLater) self.remote_node = Node(node_id=b'2' * 48, udpPort=self.udpPort, externalIP="127.0.0.2", listenUDP=listenUDP, resolve=resolve, clock=self._reactor, callLater=self._reactor.callLater) self.remote_contact = self.node.contact_manager.make_contact( b'2' * 48, '127.0.0.2', 9182, self.node._protocol) self.us_from_them = self.remote_node.contact_manager.make_contact( b'1' * 48, '127.0.0.1', 9182, self.remote_node._protocol) self.node.start_listening() self.remote_node.start_listening() @defer.inlineCallbacks def tearDown(self): yield self.node.stop() yield self.remote_node.stop() del self._reactor @defer.inlineCallbacks def testReactor(self): """ Tests if the reactor can start/stop the protocol correctly """ d = defer.Deferred() self._reactor.callLater(1, d.callback, True) self._reactor.advance(1) result = yield d self.assertTrue(result) @defer.inlineCallbacks def testRPCTimeout(self): """ Tests if a RPC message sent to a dead remote node times out correctly """ yield self.remote_node.stop() self._reactor.pump([1 for _ in range(10)]) self.node.addContact(self.remote_contact) @rpcmethod def fake_ping(*args, **kwargs): time.sleep(lbrynet.dht.constants.rpcTimeout + 1) return 'pong' real_ping = self.node.ping real_timeout = lbrynet.dht.constants.rpcTimeout real_attempts = lbrynet.dht.constants.rpcAttempts lbrynet.dht.constants.rpcAttempts = 1 lbrynet.dht.constants.rpcTimeout = 1 self.node.ping = fake_ping # Make sure the contact was added self.assertFalse( self.remote_contact not in self.node.contacts, 'Contact not added to fake node (error in test code)') self.node.start_listening() # Run the PING RPC (which should raise a timeout error) df = self.remote_contact.ping() def check_timeout(err): self.assertEqual(err.type, TimeoutError) df.addErrback(check_timeout) def reset_values(): self.node.ping = real_ping lbrynet.dht.constants.rpcTimeout = real_timeout lbrynet.dht.constants.rpcAttempts = real_attempts # See if the contact was removed due to the timeout def check_removed_contact(): self.assertFalse( self.remote_contact in self.node.contacts, 'Contact was not removed after RPC timeout; check exception types.' ) df.addCallback(lambda _: reset_values()) # Stop the reactor if a result arrives (timeout or not) df.addCallback(lambda _: check_removed_contact()) self._reactor.pump([1 for _ in range(20)]) @defer.inlineCallbacks def testRPCRequest(self): """ Tests if a valid RPC request is executed and responded to correctly """ yield self.node.addContact(self.remote_contact) self.error = None def handleError(f): self.error = 'An RPC error occurred: %s' % f.getErrorMessage() def handleResult(result): expectedResult = b'pong' if result != expectedResult: self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' \ % (expectedResult, result) # Simulate the RPC df = self.remote_contact.ping() df.addCallback(handleResult) df.addErrback(handleError) self._reactor.advance(2) yield df self.assertFalse(self.error, self.error) # The list of sent RPC messages should be empty at this stage self.assertEqual( len(self.node._protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, ' 'but the transaction is already done!') def testRPCAccess(self): """ Tests invalid RPC requests Verifies that a RPC request for an existing but unpublished method is denied, and that the associated (remote) exception gets raised locally """ self.assertRaises(AttributeError, getattr, self.remote_contact, "not_a_rpc_function") def testRPCRequestArgs(self): """ Tests if an RPC requiring arguments is executed correctly """ self.node.addContact(self.remote_contact) self.error = None def handleError(f): self.error = 'An RPC error occurred: %s' % f.getErrorMessage() def handleResult(result): expectedResult = b'pong' if result != expectedResult: self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % \ (expectedResult, result) # Publish the "local" node on the network self.node.start_listening() # Simulate the RPC df = self.remote_contact.ping() df.addCallback(handleResult) df.addErrback(handleError) self._reactor.pump([1 for _ in range(10)]) self.assertFalse(self.error, self.error) # The list of sent RPC messages should be empty at this stage self.assertEqual( len(self.node._protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, ' 'but the transaction is already done!') @defer.inlineCallbacks def testDetectProtocolVersion(self): original_findvalue = self.remote_node.findValue fake_blob = unhexlify("AB" * 48) @rpcmethod def findValue(contact, key): result = original_findvalue(contact, key) result.pop(b'protocolVersion') return result self.remote_node.findValue = findValue d = self.remote_contact.findValue(fake_blob) self._reactor.advance(3) find_value_response = yield d self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue('protocolVersion' not in find_value_response) self.remote_node.findValue = original_findvalue d = self.remote_contact.findValue(fake_blob) self._reactor.advance(3) find_value_response = yield d self.assertEqual(self.remote_contact.protocolVersion, 1) self.assertTrue('protocolVersion' not in find_value_response) self.remote_node.findValue = findValue d = self.remote_contact.findValue(fake_blob) self._reactor.advance(3) find_value_response = yield d self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue('protocolVersion' not in find_value_response) @defer.inlineCallbacks def testStoreToPre_0_20_0_Node(self): def _dont_migrate(contact, method, *args): return args, {} self.remote_node._protocol._migrate_incoming_rpc_args = _dont_migrate original_findvalue = self.remote_node.findValue original_store = self.remote_node.store @rpcmethod def findValue(contact, key): result = original_findvalue(contact, key) if b'protocolVersion' in result: result.pop(b'protocolVersion') return result @rpcmethod def store(contact, key, value, originalPublisherID=None, self_store=False, **kwargs): self.assertTrue(len(key) == 48) self.assertSetEqual(set(value.keys()), {b'token', b'lbryid', b'port'}) self.assertFalse(self_store) self.assertDictEqual(kwargs, {}) return original_store( # pylint: disable=too-many-function-args contact, key, value[b'token'], value[b'port'], originalPublisherID, 0) self.remote_node.findValue = findValue self.remote_node.store = store fake_blob = unhexlify("AB" * 48) d = self.remote_contact.findValue(fake_blob) self._reactor.advance(3) find_value_response = yield d self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue(b'protocolVersion' not in find_value_response) token = find_value_response[b'token'] d = self.remote_contact.store(fake_blob, token, 3333, self.node.node_id, 0) self._reactor.advance(3) response = yield d self.assertEqual(response, b'OK') self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue(self.remote_node._dataStore.hasPeersForBlob(fake_blob)) self.assertEqual(len(self.remote_node._dataStore.getStoringContacts()), 1) @defer.inlineCallbacks def testStoreFromPre_0_20_0_Node(self): def _dont_migrate(contact, method, *args): return args self.remote_node._protocol._migrate_outgoing_rpc_args = _dont_migrate us_from_them = self.remote_node.contact_manager.make_contact( b'1' * 48, '127.0.0.1', self.udpPort, self.remote_node._protocol) fake_blob = unhexlify("AB" * 48) d = us_from_them.findValue(fake_blob) self._reactor.advance(3) find_value_response = yield d self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue(b'protocolVersion' not in find_value_response) token = find_value_response[b'token'] us_from_them.update_protocol_version(0) d = self.remote_node._protocol.sendRPC( us_from_them, b"store", (fake_blob, { b'lbryid': self.remote_node.node_id, b'token': token, b'port': 3333 })) self._reactor.advance(3) response = yield d self.assertEqual(response, b'OK') self.assertEqual(self.remote_contact.protocolVersion, 0) self.assertTrue(self.node._dataStore.hasPeersForBlob(fake_blob)) self.assertEqual(len(self.node._dataStore.getStoringContacts()), 1) self.assertIs(self.node._dataStore.getStoringContacts()[0], self.remote_contact)