def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: raise Exception( "TempBlobManager is no longer supported, specify BlobManager or db_dir") else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) if self.blob_tracker is None: self.blob_tracker = self.blob_tracker_class(self.blob_manager, self.peer_finder, self.dht_node) if self.payment_rate_manager is None: self.payment_rate_manager = self.payment_rate_manager_class( self.base_payment_rate_manager, self.blob_tracker, self.is_generous) self.rate_limiter.start() d1 = self.blob_manager.setup() d2 = self.wallet.start() dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True) dl.addCallback(lambda _: self.blob_tracker.start()) return dl
def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: self.blob_manager = TempBlobManager(self.hash_announcer) else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) self.rate_limiter.start() d1 = self.blob_manager.setup() d2 = self.wallet.start() dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True) dl.addErrback(lambda err: err.value.subFailure) return dl
def start(self): use_epoll_on_linux() init_conf_windows() from twisted.internet import reactor self.reactor = reactor logging.debug("Starting the uploader") Random.atfork() r = random.Random() r.seed("start_lbry_uploader") wallet = FakeWallet() peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() self.sd_identifier = StreamDescriptorIdentifier() self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.session = Session(conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, blob_dir=self.blob_dir, node_id="abcd", peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553, dht_node_port=4445, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, dht_node_class=Node, is_generous=self.is_generous, external_ip="127.0.0.1") self.lbry_file_manager = EncryptedFileManager(self.session, self.sd_identifier) if self.ul_rate_limit is not None: self.session.rate_limiter.set_ul_limit(self.ul_rate_limit) reactor.callLater(1, self.start_all) if not reactor.running: reactor.run()
def start(self): use_epoll_on_linux() from twisted.internet import reactor self.reactor = reactor logging.debug("Starting the uploader") Random.atfork() r = random.Random() r.seed("start_lbry_uploader") wallet = FakeWallet() peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() self.sd_identifier = StreamDescriptorIdentifier() db_dir = "server" os.mkdir(db_dir) self.session = Session(settings.data_rate, db_dir=db_dir, lbryid="abcd", peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, dht_node_class=Node, is_generous=self.is_generous) stream_info_manager = TempEncryptedFileMetadataManager() self.lbry_file_manager = EncryptedFileManager(self.session, stream_info_manager, self.sd_identifier) if self.ul_rate_limit is not None: self.session.rate_limiter.set_ul_limit(self.ul_rate_limit) reactor.callLater(1, self.start_all) if not reactor.running: reactor.run()
class LBRYSession(object): """This class manages all important services common to any application that uses the network: the hash announcer, which informs other peers that this peer is associated with some hash. Usually, this means this peer has a blob identified by the hash in question, but it can be used for other purposes. the peer finder, which finds peers that are associated with some hash. the blob manager, which keeps track of which blobs have been downloaded and provides access to them, the rate limiter, which attempts to ensure download and upload rates stay below a set maximum, and upnp, which opens holes in compatible firewalls so that remote peers can connect to this peer.""" def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None): """ @param blob_data_payment_rate: The default payment rate for blob data @param db_dir: The directory in which levelDB files should be stored @param lbryid: The unique ID of this node @param peer_manager: An object which keeps track of all known peers. If None, a PeerManager will be created @param dht_node_port: The port on which the dht node should listen for incoming connections @param known_dht_nodes: A list of nodes which the dht node should use to bootstrap into the dht @param peer_finder: An object which is used to look up peers that are associated with some hash. If None, a DHTPeerFinder will be used, which looks for peers in the distributed hash table. @param hash_announcer: An object which announces to other peers that this peer is associated with some hash. If None, and peer_port is not None, a DHTHashAnnouncer will be used. If None and peer_port is None, a DummyHashAnnouncer will be used, which will not actually announce anything. @param blob_dir: The directory in which blobs will be stored. If None and blob_manager is None, blobs will be stored in memory only. @param blob_manager: An object which keeps track of downloaded blobs and provides access to them. If None, and blob_dir is not None, a DiskBlobManager will be used, with the given blob_dir. If None and blob_dir is None, a TempBlobManager will be used, which stores blobs in memory only. @param peer_port: The port on which other peers should connect to this peer @param use_upnp: Whether or not to try to open a hole in the firewall so that outside peers can connect to this peer's peer_port and dht_node_port @param rate_limiter: An object which keeps track of the amount of data transferred to and from this peer, and can limit that rate if desired @param wallet: An object which will be used to keep track of expected payments and which will pay peers. If None, a wallet which uses the Point Trader system will be used, which is meant for testing only @return: """ self.db_dir = db_dir self.lbryid = lbryid self.peer_manager = peer_manager self.dht_node_port = dht_node_port self.known_dht_nodes = known_dht_nodes if self.known_dht_nodes is None: self.known_dht_nodes = [] self.peer_finder = peer_finder self.hash_announcer = hash_announcer self.blob_dir = blob_dir self.blob_manager = blob_manager self.peer_port = peer_port self.use_upnp = use_upnp self.rate_limiter = rate_limiter self.external_ip = '127.0.0.1' self.upnp_redirects = [] self.wallet = wallet self.dht_node = None self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate) def setup(self): """Create the blob directory and database if necessary, start all desired services""" log.debug("Setting up the lbry session") if self.lbryid is None: self.lbryid = generate_id() if self.wallet is None: self.wallet = PTCWallet(self.db_dir) if self.peer_manager is None: self.peer_manager = PeerManager() if self.use_upnp is True: d = self._try_upnp() else: d = defer.succeed(True) if self.peer_finder is None: d.addCallback(lambda _: self._setup_dht()) else: if self.hash_announcer is None and self.peer_port is not None: log.warning("The server has no way to advertise its available blobs.") self.hash_announcer = DummyHashAnnouncer() d.addCallback(lambda _: self._setup_other_components()) return d def shut_down(self): """Stop all services""" ds = [] if self.dht_node is not None: ds.append(defer.maybeDeferred(self.dht_node.stop)) if self.rate_limiter is not None: ds.append(defer.maybeDeferred(self.rate_limiter.stop)) if self.peer_finder is not None: ds.append(defer.maybeDeferred(self.peer_finder.stop)) if self.hash_announcer is not None: ds.append(defer.maybeDeferred(self.hash_announcer.stop)) if self.wallet is not None: ds.append(defer.maybeDeferred(self.wallet.stop)) if self.blob_manager is not None: ds.append(defer.maybeDeferred(self.blob_manager.stop)) if self.use_upnp is True: ds.append(defer.maybeDeferred(self._unset_upnp)) return defer.DeferredList(ds) def _try_upnp(self): log.debug("In _try_upnp") def threaded_try_upnp(): if self.use_upnp is False: log.debug("Not using upnp") return False u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() external_ip = u.externalipaddress() if external_ip != '0.0.0.0': self.external_ip = external_ip if self.peer_port is not None: if u.getspecificportmapping(self.peer_port, 'TCP') is None: u.addportmapping(self.peer_port, 'TCP', u.lanaddr, self.peer_port, 'LBRY peer port', '') self.upnp_redirects.append((self.peer_port, 'TCP')) log.info("Set UPnP redirect for TCP port %d", self.peer_port) else: log.warning("UPnP redirect already set for TCP port %d", self.peer_port) if self.dht_node_port is not None: if u.getspecificportmapping(self.dht_node_port, 'UDP') is None: u.addportmapping(self.dht_node_port, 'UDP', u.lanaddr, self.dht_node_port, 'LBRY DHT port', '') self.upnp_redirects.append((self.dht_node_port, 'UDP')) log.info("Set UPnP redirect for UPD port %d", self.dht_node_port) else: log.warning("UPnP redirect already set for UDP port %d", self.dht_node_port) return True return False def upnp_failed(err): log.warning("UPnP failed. Reason: %s", err.getErrorMessage()) return False d = threads.deferToThread(threaded_try_upnp) d.addErrback(upnp_failed) return d def _setup_dht(self): from twisted.internet import reactor log.debug("Starting the dht") def match_port(h, p): return h, p def join_resolved_addresses(result): addresses = [] for success, value in result: if success is True: addresses.append(value) return addresses def start_dht(addresses): self.dht_node.joinNetwork(addresses) self.peer_finder.run_manage_loop() self.hash_announcer.run_manage_loop() return True ds = [] for host, port in self.known_dht_nodes: d = reactor.resolve(host) d.addCallback(match_port, port) ds.append(d) self.dht_node = node.Node(udpPort=self.dht_node_port, lbryid=self.lbryid, externalIP=self.external_ip) self.peer_finder = DHTPeerFinder(self.dht_node, self.peer_manager) if self.hash_announcer is None: self.hash_announcer = DHTHashAnnouncer(self.dht_node, self.peer_port) dl = defer.DeferredList(ds) dl.addCallback(join_resolved_addresses) dl.addCallback(start_dht) return dl def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: self.blob_manager = TempBlobManager(self.hash_announcer) else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) self.rate_limiter.start() d1 = self.blob_manager.setup() d2 = self.wallet.start() dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True) dl.addErrback(lambda err: err.value.subFailure) return dl def _unset_upnp(self): def threaded_unset_upnp(): u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() for port, protocol in self.upnp_redirects: if u.getspecificportmapping(port, protocol) is None: log.warning("UPnP redirect for %s %d was removed by something else.", protocol, port) else: u.deleteportmapping(port, protocol) log.info("Removed UPnP redirect for %s %d.", protocol, port) self.upnp_redirects = [] d = threads.deferToThread(threaded_unset_upnp) d.addErrback(lambda err: str(err)) return d
def start_blob_uploader(blob_hash_queue, kill_event, dead_event, slow, is_generous=False): use_epoll_on_linux() from twisted.internet import reactor logging.debug("Starting the uploader") Random.atfork() wallet = FakeWallet() peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() if slow is True: peer_port = 5553 db_dir = "server1" else: peer_port = 5554 db_dir = "server2" blob_dir = os.path.join(db_dir, "blobfiles") os.mkdir(db_dir) os.mkdir(blob_dir) session = Session( conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, lbryid="efgh", peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=blob_dir, peer_port=peer_port, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1]) if slow is True: session.rate_limiter.set_ul_limit(2**11) def start_all(): d = session.setup() d.addCallback(lambda _: start_server()) d.addCallback(lambda _: create_single_blob()) d.addCallback(put_blob_hash_on_queue) def print_error(err): logging.critical("Server error: %s", err.getErrorMessage()) d.addErrback(print_error) return d def start_server(): server_port = None query_handler_factories = { 1: BlobAvailabilityHandlerFactory(session.blob_manager), 2: BlobRequestHandlerFactory(session.blob_manager, session.wallet, session.payment_rate_manager, None), 3: session.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(session.rate_limiter, query_handler_factories, session.peer_manager) server_port = reactor.listenTCP(peer_port, server_factory) logging.debug("Started listening") def kill_server(): ds = [] ds.append(session.shut_down()) if server_port: ds.append(server_port.stopListening()) kill_check.stop() dead_event.set() dl = defer.DeferredList(ds) dl.addCallback(lambda _: reactor.stop()) return dl def check_for_kill(): if kill_event.is_set(): kill_server() kill_check = task.LoopingCall(check_for_kill) kill_check.start(1.0) return True def create_single_blob(): blob_creator = session.blob_manager.get_blob_creator() blob_creator.write("0" * 2**21) return blob_creator.close() def put_blob_hash_on_queue(blob_hash): logging.debug("Telling the client to start running. Blob hash: %s", str(blob_hash)) blob_hash_queue.put(blob_hash) logging.debug("blob hash has been added to the queue") reactor.callLater(1, start_all) if not reactor.running: reactor.run()
def start_lbry_reuploader(sd_hash, kill_event, dead_event, ready_event, n, ul_rate_limit=None, is_generous=False): use_epoll_on_linux() from twisted.internet import reactor logging.debug("Starting the uploader") Random.atfork() r = random.Random() r.seed("start_lbry_reuploader") wallet = FakeWallet() peer_port = 5553 + n peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() sd_identifier = StreamDescriptorIdentifier() db_dir = "server_" + str(n) blob_dir = os.path.join(db_dir, "blobfiles") os.mkdir(db_dir) os.mkdir(blob_dir) session = Session( conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, lbryid="abcd" + str(n), peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=None, peer_port=peer_port, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1]) stream_info_manager = TempEncryptedFileMetadataManager() lbry_file_manager = EncryptedFileManager(session, stream_info_manager, sd_identifier) if ul_rate_limit is not None: session.rate_limiter.set_ul_limit(ul_rate_limit) def make_downloader(metadata, prm): info_validator = metadata.validator options = metadata.options factories = metadata.factories chosen_options = [ o.default_value for o in options.get_downloader_options(info_validator, prm) ] return factories[0].make_downloader(metadata, chosen_options, prm) def download_file(): prm = session.payment_rate_manager d = download_sd_blob(session, sd_hash, prm) d.addCallback(sd_identifier.get_metadata_for_sd_blob) d.addCallback(make_downloader, prm) d.addCallback(lambda downloader: downloader.start()) return d def start_transfer(): logging.debug("Starting the transfer") d = session.setup() d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier)) d.addCallback(lambda _: lbry_file_manager.setup()) d.addCallback(lambda _: download_file()) return d def start_server(): server_port = None query_handler_factories = { 1: BlobAvailabilityHandlerFactory(session.blob_manager), 2: BlobRequestHandlerFactory(session.blob_manager, session.wallet, session.payment_rate_manager, None), 3: session.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(session.rate_limiter, query_handler_factories, session.peer_manager) server_port = reactor.listenTCP(peer_port, server_factory) logging.debug("Started listening") def kill_server(): ds = [] ds.append(session.shut_down()) ds.append(lbry_file_manager.stop()) if server_port: ds.append(server_port.stopListening()) kill_check.stop() dead_event.set() dl = defer.DeferredList(ds) dl.addCallback(lambda _: reactor.stop()) return dl def check_for_kill(): if kill_event.is_set(): kill_server() kill_check = task.LoopingCall(check_for_kill) kill_check.start(1.0) ready_event.set() logging.debug("set the ready event") d = task.deferLater(reactor, 1.0, start_transfer) d.addCallback(lambda _: start_server()) if not reactor.running: reactor.run()
def __init__(self, component_manager): Component.__init__(self, component_manager) self.rate_limiter = RateLimiter()
class LBRYSession(object): """This class manages all important services common to any application that uses the network: the hash announcer, which informs other peers that this peer is associated with some hash. Usually, this means this peer has a blob identified by the hash in question, but it can be used for other purposes. the peer finder, which finds peers that are associated with some hash. the blob manager, which keeps track of which blobs have been downloaded and provides access to them, the rate limiter, which attempts to ensure download and upload rates stay below a set maximum, and upnp, which opens holes in compatible firewalls so that remote peers can connect to this peer.""" def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node): """ @param blob_data_payment_rate: The default payment rate for blob data @param db_dir: The directory in which levelDB files should be stored @param lbryid: The unique ID of this node @param peer_manager: An object which keeps track of all known peers. If None, a PeerManager will be created @param dht_node_port: The port on which the dht node should listen for incoming connections @param known_dht_nodes: A list of nodes which the dht node should use to bootstrap into the dht @param peer_finder: An object which is used to look up peers that are associated with some hash. If None, a DHTPeerFinder will be used, which looks for peers in the distributed hash table. @param hash_announcer: An object which announces to other peers that this peer is associated with some hash. If None, and peer_port is not None, a DHTHashAnnouncer will be used. If None and peer_port is None, a DummyHashAnnouncer will be used, which will not actually announce anything. @param blob_dir: The directory in which blobs will be stored. If None and blob_manager is None, blobs will be stored in memory only. @param blob_manager: An object which keeps track of downloaded blobs and provides access to them. If None, and blob_dir is not None, a DiskBlobManager will be used, with the given blob_dir. If None and blob_dir is None, a TempBlobManager will be used, which stores blobs in memory only. @param peer_port: The port on which other peers should connect to this peer @param use_upnp: Whether or not to try to open a hole in the firewall so that outside peers can connect to this peer's peer_port and dht_node_port @param rate_limiter: An object which keeps track of the amount of data transferred to and from this peer, and can limit that rate if desired @param wallet: An object which will be used to keep track of expected payments and which will pay peers. If None, a wallet which uses the Point Trader system will be used, which is meant for testing only @return: """ self.db_dir = db_dir self.lbryid = lbryid self.peer_manager = peer_manager self.dht_node_port = dht_node_port self.known_dht_nodes = known_dht_nodes if self.known_dht_nodes is None: self.known_dht_nodes = [] self.peer_finder = peer_finder self.hash_announcer = hash_announcer self.blob_dir = blob_dir self.blob_manager = blob_manager self.peer_port = peer_port self.use_upnp = use_upnp self.rate_limiter = rate_limiter self.external_ip = '127.0.0.1' self.upnp_redirects = [] self.wallet = wallet self.dht_node_class = dht_node_class self.dht_node = None self.base_payment_rate_manager = BasePaymentRateManager( blob_data_payment_rate) def setup(self): """Create the blob directory and database if necessary, start all desired services""" log.debug("Setting up the lbry session") if self.lbryid is None: self.lbryid = generate_id() if self.wallet is None: self.wallet = PTCWallet(self.db_dir) if self.peer_manager is None: self.peer_manager = PeerManager() if self.use_upnp is True: d = self._try_upnp() else: d = defer.succeed(True) if self.peer_finder is None: d.addCallback(lambda _: self._setup_dht()) else: if self.hash_announcer is None and self.peer_port is not None: log.warning( "The server has no way to advertise its available blobs.") self.hash_announcer = DummyHashAnnouncer() d.addCallback(lambda _: self._setup_other_components()) return d def shut_down(self): """Stop all services""" ds = [] if self.dht_node is not None: ds.append(defer.maybeDeferred(self.dht_node.stop)) if self.rate_limiter is not None: ds.append(defer.maybeDeferred(self.rate_limiter.stop)) if self.peer_finder is not None: ds.append(defer.maybeDeferred(self.peer_finder.stop)) if self.hash_announcer is not None: ds.append(defer.maybeDeferred(self.hash_announcer.stop)) if self.wallet is not None: ds.append(defer.maybeDeferred(self.wallet.stop)) if self.blob_manager is not None: ds.append(defer.maybeDeferred(self.blob_manager.stop)) if self.use_upnp is True: ds.append(defer.maybeDeferred(self._unset_upnp)) return defer.DeferredList(ds) def _try_upnp(self): log.debug("In _try_upnp") def threaded_try_upnp(): if self.use_upnp is False: log.debug("Not using upnp") return False u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() external_ip = u.externalipaddress() if external_ip != '0.0.0.0': self.external_ip = external_ip if self.peer_port is not None: if u.getspecificportmapping(self.peer_port, 'TCP') is None: u.addportmapping(self.peer_port, 'TCP', u.lanaddr, self.peer_port, 'LBRY peer port', '') self.upnp_redirects.append((self.peer_port, 'TCP')) log.info("Set UPnP redirect for TCP port %d", self.peer_port) else: # see comment below log.warning( "UPnP redirect already set for TCP port %d", self.peer_port) self.upnp_redirects.append((self.peer_port, 'TCP')) if self.dht_node_port is not None: if u.getspecificportmapping(self.dht_node_port, 'UDP') is None: u.addportmapping(self.dht_node_port, 'UDP', u.lanaddr, self.dht_node_port, 'LBRY DHT port', '') self.upnp_redirects.append((self.dht_node_port, 'UDP')) log.info("Set UPnP redirect for UPD port %d", self.dht_node_port) else: # TODO: check that the existing redirect was put up by an old lbrynet session before grabbing it # if such a disconnected redirect exists, then upnp won't work unless the redirect is appended # or is torn down and set back up. a bad shutdown of lbrynet could leave such a redirect up # and cause problems on the next start. # this could be problematic if a previous lbrynet session didn't make the redirect, and it was # made by another application log.warning( "UPnP redirect already set for UDP port %d", self.dht_node_port) self.upnp_redirects.append((self.dht_node_port, 'UDP')) return True return False def upnp_failed(err): log.warning("UPnP failed. Reason: %s", err.getErrorMessage()) return False d = threads.deferToThread(threaded_try_upnp) d.addErrback(upnp_failed) return d def _setup_dht(self): from twisted.internet import reactor log.debug("Starting the dht") def match_port(h, p): return h, p def join_resolved_addresses(result): addresses = [] for success, value in result: if success is True: addresses.append(value) return addresses def start_dht(addresses): self.dht_node.joinNetwork(addresses) self.peer_finder.run_manage_loop() self.hash_announcer.run_manage_loop() return True ds = [] for host, port in self.known_dht_nodes: d = reactor.resolve(host) d.addCallback(match_port, port) ds.append(d) self.dht_node = self.dht_node_class(udpPort=self.dht_node_port, lbryid=self.lbryid, externalIP=self.external_ip) self.peer_finder = DHTPeerFinder(self.dht_node, self.peer_manager) if self.hash_announcer is None: self.hash_announcer = DHTHashAnnouncer(self.dht_node, self.peer_port) dl = defer.DeferredList(ds) dl.addCallback(join_resolved_addresses) dl.addCallback(start_dht) return dl def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: self.blob_manager = TempBlobManager(self.hash_announcer) else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) self.rate_limiter.start() d1 = self.blob_manager.setup() d2 = self.wallet.start() dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True) dl.addErrback(lambda err: err.value.subFailure) return dl def _unset_upnp(self): def threaded_unset_upnp(): u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() for port, protocol in self.upnp_redirects: if u.getspecificportmapping(port, protocol) is None: log.warning( "UPnP redirect for %s %d was removed by something else.", protocol, port) else: u.deleteportmapping(port, protocol) log.info("Removed UPnP redirect for %s %d.", protocol, port) self.upnp_redirects = [] d = threads.deferToThread(threaded_unset_upnp) d.addErrback(lambda err: str(err)) return d
class Session(object): """This class manages all important services common to any application that uses the network. the hash announcer, which informs other peers that this peer is associated with some hash. Usually, this means this peer has a blob identified by the hash in question, but it can be used for other purposes. the peer finder, which finds peers that are associated with some hash. the blob manager, which keeps track of which blobs have been downloaded and provides access to them, the rate limiter, which attempts to ensure download and upload rates stay below a set maximum upnp, which opens holes in compatible firewalls so that remote peers can connect to this peer. """ def __init__(self, blob_data_payment_rate, db_dir=None, node_id=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node, blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True, external_ip=None, storage=None): """@param blob_data_payment_rate: The default payment rate for blob data @param db_dir: The directory in which levelDB files should be stored @param node_id: The unique ID of this node @param peer_manager: An object which keeps track of all known peers. If None, a PeerManager will be created @param dht_node_port: The port on which the dht node should listen for incoming connections @param known_dht_nodes: A list of nodes which the dht node should use to bootstrap into the dht @param peer_finder: An object which is used to look up peers that are associated with some hash. If None, a DHTPeerFinder will be used, which looks for peers in the distributed hash table. @param hash_announcer: An object which announces to other peers that this peer is associated with some hash. If None, and peer_port is not None, a DHTHashAnnouncer will be used. If None and peer_port is None, a DummyHashAnnouncer will be used, which will not actually announce anything. @param blob_dir: The directory in which blobs will be stored. If None and blob_manager is None, blobs will be stored in memory only. @param blob_manager: An object which keeps track of downloaded blobs and provides access to them. If None, and blob_dir is not None, a DiskBlobManager will be used, with the given blob_dir. If None and blob_dir is None, a TempBlobManager will be used, which stores blobs in memory only. @param peer_port: The port on which other peers should connect to this peer @param use_upnp: Whether or not to try to open a hole in the firewall so that outside peers can connect to this peer's peer_port and dht_node_port @param rate_limiter: An object which keeps track of the amount of data transferred to and from this peer, and can limit that rate if desired @param wallet: An object which will be used to keep track of expected payments and which will pay peers. If None, a wallet which uses the Point Trader system will be used, which is meant for testing only """ self.db_dir = db_dir self.node_id = node_id self.peer_manager = peer_manager self.dht_node_port = dht_node_port self.known_dht_nodes = known_dht_nodes if self.known_dht_nodes is None: self.known_dht_nodes = [] self.peer_finder = peer_finder self.hash_announcer = hash_announcer self.blob_dir = blob_dir self.blob_manager = blob_manager self.blob_tracker = None self.blob_tracker_class = blob_tracker_class or BlobAvailabilityTracker self.peer_port = peer_port self.use_upnp = use_upnp self.rate_limiter = rate_limiter self.external_ip = external_ip self.upnp_redirects = [] self.wallet = wallet self.dht_node_class = dht_node_class self.dht_node = None self.base_payment_rate_manager = BasePaymentRateManager( blob_data_payment_rate) self.payment_rate_manager = None self.payment_rate_manager_class = payment_rate_manager_class or NegotiatedPaymentRateManager self.is_generous = is_generous self.storage = storage or SQLiteStorage(self.db_dir) def setup(self): """Create the blob directory and database if necessary, start all desired services""" log.debug("Starting session.") if self.node_id is None: self.node_id = generate_id() if self.wallet is None: from lbrynet.core.PTCWallet import PTCWallet self.wallet = PTCWallet(self.db_dir) if self.peer_manager is None: self.peer_manager = PeerManager() if self.use_upnp is True: d = self._try_upnp() else: d = defer.succeed(True) if self.peer_finder is None: d.addCallback(lambda _: self._setup_dht()) else: if self.hash_announcer is None and self.peer_port is not None: log.warning( "The server has no way to advertise its available blobs.") self.hash_announcer = DummyHashAnnouncer() d.addCallback(lambda _: self._setup_other_components()) return d def shut_down(self): """Stop all services""" log.info('Stopping session.') ds = [] if self.blob_tracker is not None: ds.append(defer.maybeDeferred(self.blob_tracker.stop)) if self.dht_node is not None: ds.append(defer.maybeDeferred(self.dht_node.stop)) if self.rate_limiter is not None: ds.append(defer.maybeDeferred(self.rate_limiter.stop)) if self.peer_finder is not None: ds.append(defer.maybeDeferred(self.peer_finder.stop)) if self.hash_announcer is not None: ds.append(defer.maybeDeferred(self.hash_announcer.stop)) if self.wallet is not None: ds.append(defer.maybeDeferred(self.wallet.stop)) if self.blob_manager is not None: ds.append(defer.maybeDeferred(self.blob_manager.stop)) if self.use_upnp is True: ds.append(defer.maybeDeferred(self._unset_upnp)) return defer.DeferredList(ds) def _try_upnp(self): log.debug("In _try_upnp") def get_free_port(upnp, port, protocol): # returns an existing mapping if it exists mapping = upnp.getspecificportmapping(port, protocol) if not mapping: return port if upnp.lanaddr == mapping[0]: return mapping return get_free_port(upnp, port + 1, protocol) def get_port_mapping(upnp, internal_port, protocol, description): # try to map to the requested port, if there is already a mapping use the next external # port available if protocol not in ['UDP', 'TCP']: raise Exception("invalid protocol") external_port = get_free_port(upnp, internal_port, protocol) if isinstance(external_port, tuple): log.info( "Found existing UPnP redirect %s:%i (%s) to %s:%i, using it", self.external_ip, external_port[1], protocol, upnp.lanaddr, internal_port) return external_port[1], protocol upnp.addportmapping(external_port, protocol, upnp.lanaddr, internal_port, description, '') log.info("Set UPnP redirect %s:%i (%s) to %s:%i", self.external_ip, external_port, protocol, upnp.lanaddr, internal_port) return external_port, protocol def threaded_try_upnp(): if self.use_upnp is False: log.debug("Not using upnp") return False u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() external_ip = u.externalipaddress() if external_ip != '0.0.0.0' and not self.external_ip: # best not to rely on this external ip, the router can be behind layers of NATs self.external_ip = external_ip if self.peer_port: self.upnp_redirects.append( get_port_mapping(u, self.peer_port, 'TCP', 'LBRY peer port')) if self.dht_node_port: self.upnp_redirects.append( get_port_mapping(u, self.dht_node_port, 'UDP', 'LBRY DHT port')) return True return False def upnp_failed(err): log.warning("UPnP failed. Reason: %s", err.getErrorMessage()) return False d = threads.deferToThread(threaded_try_upnp) d.addErrback(upnp_failed) return d # the callback, if any, will be invoked once the joining procedure # has terminated def join_dht(self, cb=None): from twisted.internet import reactor def join_resolved_addresses(result): addresses = [] for success, value in result: if success is True: addresses.append(value) return addresses @defer.inlineCallbacks def join_network(knownNodes): log.debug("join DHT using known nodes: " + str(knownNodes)) result = yield self.dht_node.joinNetwork(knownNodes) defer.returnValue(result) ds = [] for host, port in self.known_dht_nodes: d = reactor.resolve(host) d.addCallback(lambda h: (h, port)) # match host to port ds.append(d) dl = defer.DeferredList(ds) dl.addCallback(join_resolved_addresses) dl.addCallback(join_network) if cb: dl.addCallback(cb) return dl def _setup_dht(self): log.info("Starting DHT") def start_dht(join_network_result): self.hash_announcer.run_manage_loop() return True self.dht_node = self.dht_node_class(udpPort=self.dht_node_port, node_id=self.node_id, externalIP=self.external_ip, peerPort=self.peer_port) self.peer_finder = DHTPeerFinder(self.dht_node, self.peer_manager) if self.hash_announcer is None: self.hash_announcer = DHTHashAnnouncer(self.dht_node, self.peer_port) self.dht_node.startNetwork() # pass start_dht() as callback to start the remaining components after joining the DHT return self.join_dht(start_dht) def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: raise Exception( "TempBlobManager is no longer supported, specify BlobManager or db_dir" ) else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.storage) if self.blob_tracker is None: self.blob_tracker = self.blob_tracker_class( self.blob_manager, self.peer_finder, self.dht_node) if self.payment_rate_manager is None: self.payment_rate_manager = self.payment_rate_manager_class( self.base_payment_rate_manager, self.blob_tracker, self.is_generous) self.rate_limiter.start() d = self.storage.setup() d.addCallback(lambda _: self.wallet.start()) d.addCallback(lambda _: self.blob_tracker.start()) return d def _unset_upnp(self): log.info("Unsetting upnp for session") def threaded_unset_upnp(): u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() for port, protocol in self.upnp_redirects: if u.getspecificportmapping(port, protocol) is None: log.warning( "UPnP redirect for %s %d was removed by something else.", protocol, port) else: u.deleteportmapping(port, protocol) log.info("Removed UPnP redirect for %s %d.", protocol, port) self.upnp_redirects = [] d = threads.deferToThread(threaded_unset_upnp) d.addErrback(lambda err: str(err)) return d
class Session(object): """This class manages all important services common to any application that uses the network. the hash announcer, which informs other peers that this peer is associated with some hash. Usually, this means this peer has a blob identified by the hash in question, but it can be used for other purposes. the peer finder, which finds peers that are associated with some hash. the blob manager, which keeps track of which blobs have been downloaded and provides access to them, the rate limiter, which attempts to ensure download and upload rates stay below a set maximum upnp, which opens holes in compatible firewalls so that remote peers can connect to this peer. """ def __init__(self, blob_data_payment_rate, db_dir=None, node_id=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node, blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True, external_ip=None): """@param blob_data_payment_rate: The default payment rate for blob data @param db_dir: The directory in which levelDB files should be stored @param node_id: The unique ID of this node @param peer_manager: An object which keeps track of all known peers. If None, a PeerManager will be created @param dht_node_port: The port on which the dht node should listen for incoming connections @param known_dht_nodes: A list of nodes which the dht node should use to bootstrap into the dht @param peer_finder: An object which is used to look up peers that are associated with some hash. If None, a DHTPeerFinder will be used, which looks for peers in the distributed hash table. @param hash_announcer: An object which announces to other peers that this peer is associated with some hash. If None, and peer_port is not None, a DHTHashAnnouncer will be used. If None and peer_port is None, a DummyHashAnnouncer will be used, which will not actually announce anything. @param blob_dir: The directory in which blobs will be stored. If None and blob_manager is None, blobs will be stored in memory only. @param blob_manager: An object which keeps track of downloaded blobs and provides access to them. If None, and blob_dir is not None, a DiskBlobManager will be used, with the given blob_dir. If None and blob_dir is None, a TempBlobManager will be used, which stores blobs in memory only. @param peer_port: The port on which other peers should connect to this peer @param use_upnp: Whether or not to try to open a hole in the firewall so that outside peers can connect to this peer's peer_port and dht_node_port @param rate_limiter: An object which keeps track of the amount of data transferred to and from this peer, and can limit that rate if desired @param wallet: An object which will be used to keep track of expected payments and which will pay peers. If None, a wallet which uses the Point Trader system will be used, which is meant for testing only """ self.db_dir = db_dir self.node_id = node_id self.peer_manager = peer_manager self.dht_node_port = dht_node_port self.known_dht_nodes = known_dht_nodes if self.known_dht_nodes is None: self.known_dht_nodes = [] self.peer_finder = peer_finder self.hash_announcer = hash_announcer self.blob_dir = blob_dir self.blob_manager = blob_manager self.blob_tracker = None self.blob_tracker_class = blob_tracker_class or BlobAvailabilityTracker self.peer_port = peer_port self.use_upnp = use_upnp self.rate_limiter = rate_limiter self.external_ip = external_ip self.upnp_redirects = [] self.wallet = wallet self.dht_node_class = dht_node_class self.dht_node = None self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate) self.payment_rate_manager = None self.payment_rate_manager_class = payment_rate_manager_class or NegotiatedPaymentRateManager self.is_generous = is_generous def setup(self): """Create the blob directory and database if necessary, start all desired services""" log.debug("Starting session.") if self.node_id is None: self.node_id = generate_id() if self.wallet is None: from lbrynet.core.PTCWallet import PTCWallet self.wallet = PTCWallet(self.db_dir) if self.peer_manager is None: self.peer_manager = PeerManager() if self.use_upnp is True: d = self._try_upnp() else: d = defer.succeed(True) if self.peer_finder is None: d.addCallback(lambda _: self._setup_dht()) else: if self.hash_announcer is None and self.peer_port is not None: log.warning("The server has no way to advertise its available blobs.") self.hash_announcer = DummyHashAnnouncer() d.addCallback(lambda _: self._setup_other_components()) return d def shut_down(self): """Stop all services""" log.info('Stopping session.') ds = [] if self.blob_tracker is not None: ds.append(defer.maybeDeferred(self.blob_tracker.stop)) if self.dht_node is not None: ds.append(defer.maybeDeferred(self.dht_node.stop)) if self.rate_limiter is not None: ds.append(defer.maybeDeferred(self.rate_limiter.stop)) if self.peer_finder is not None: ds.append(defer.maybeDeferred(self.peer_finder.stop)) if self.hash_announcer is not None: ds.append(defer.maybeDeferred(self.hash_announcer.stop)) if self.wallet is not None: ds.append(defer.maybeDeferred(self.wallet.stop)) if self.blob_manager is not None: ds.append(defer.maybeDeferred(self.blob_manager.stop)) if self.use_upnp is True: ds.append(defer.maybeDeferred(self._unset_upnp)) return defer.DeferredList(ds) def _try_upnp(self): log.debug("In _try_upnp") def get_free_port(upnp, port, protocol): # returns an existing mapping if it exists mapping = upnp.getspecificportmapping(port, protocol) if not mapping: return port if upnp.lanaddr == mapping[0]: return mapping return get_free_port(upnp, port + 1, protocol) def get_port_mapping(upnp, internal_port, protocol, description): # try to map to the requested port, if there is already a mapping use the next external # port available if protocol not in ['UDP', 'TCP']: raise Exception("invalid protocol") external_port = get_free_port(upnp, internal_port, protocol) if isinstance(external_port, tuple): log.info("Found existing UPnP redirect %s:%i (%s) to %s:%i, using it", self.external_ip, external_port[1], protocol, upnp.lanaddr, internal_port) return external_port[1], protocol upnp.addportmapping(external_port, protocol, upnp.lanaddr, internal_port, description, '') log.info("Set UPnP redirect %s:%i (%s) to %s:%i", self.external_ip, external_port, protocol, upnp.lanaddr, internal_port) return external_port, protocol def threaded_try_upnp(): if self.use_upnp is False: log.debug("Not using upnp") return False u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() external_ip = u.externalipaddress() if external_ip != '0.0.0.0' and not self.external_ip: # best not to rely on this external ip, the router can be behind layers of NATs self.external_ip = external_ip if self.peer_port: self.upnp_redirects.append(get_port_mapping(u, self.peer_port, 'TCP', 'LBRY peer port')) if self.dht_node_port: self.upnp_redirects.append(get_port_mapping(u, self.dht_node_port, 'UDP', 'LBRY DHT port')) return True return False def upnp_failed(err): log.warning("UPnP failed. Reason: %s", err.getErrorMessage()) return False d = threads.deferToThread(threaded_try_upnp) d.addErrback(upnp_failed) return d def _setup_dht(self): from twisted.internet import reactor log.info("Starting DHT") def join_resolved_addresses(result): addresses = [] for success, value in result: if success is True: addresses.append(value) return addresses def start_dht(join_network_result): self.peer_finder.run_manage_loop() self.hash_announcer.run_manage_loop() return True ds = [] for host, port in self.known_dht_nodes: d = reactor.resolve(host) d.addCallback(lambda h: (h, port)) # match host to port ds.append(d) self.dht_node = self.dht_node_class( udpPort=self.dht_node_port, node_id=self.node_id, externalIP=self.external_ip, peerPort=self.peer_port ) self.peer_finder = DHTPeerFinder(self.dht_node, self.peer_manager) if self.hash_announcer is None: self.hash_announcer = DHTHashAnnouncer(self.dht_node, self.peer_port) dl = defer.DeferredList(ds) dl.addCallback(join_resolved_addresses) dl.addCallback(self.dht_node.joinNetwork) dl.addCallback(start_dht) return dl def _setup_other_components(self): log.debug("Setting up the rest of the components") if self.rate_limiter is None: self.rate_limiter = RateLimiter() if self.blob_manager is None: if self.blob_dir is None: raise Exception( "TempBlobManager is no longer supported, specify BlobManager or db_dir") else: self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) if self.blob_tracker is None: self.blob_tracker = self.blob_tracker_class(self.blob_manager, self.peer_finder, self.dht_node) if self.payment_rate_manager is None: self.payment_rate_manager = self.payment_rate_manager_class( self.base_payment_rate_manager, self.blob_tracker, self.is_generous) self.rate_limiter.start() d1 = self.blob_manager.setup() d2 = self.wallet.start() dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True) dl.addCallback(lambda _: self.blob_tracker.start()) return dl def _unset_upnp(self): log.info("Unsetting upnp for session") def threaded_unset_upnp(): u = miniupnpc.UPnP() num_devices_found = u.discover() if num_devices_found > 0: u.selectigd() for port, protocol in self.upnp_redirects: if u.getspecificportmapping(port, protocol) is None: log.warning( "UPnP redirect for %s %d was removed by something else.", protocol, port) else: u.deleteportmapping(port, protocol) log.info("Removed UPnP redirect for %s %d.", protocol, port) self.upnp_redirects = [] d = threads.deferToThread(threaded_unset_upnp) d.addErrback(lambda err: str(err)) return d
class LbryUploader(object): def __init__(self, file_size, ul_rate_limit=None): self.file_size = file_size self.ul_rate_limit = ul_rate_limit self.kill_check = None # these attributes get defined in `start` self.db_dir = None self.blob_dir = None self.wallet = None self.peer_manager = None self.rate_limiter = None self.prm = None self.storage = None self.blob_manager = None self.lbry_file_manager = None self.server_port = None @defer.inlineCallbacks def setup(self): init_conf_windows() self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.rate_limiter = RateLimiter() if self.ul_rate_limit is not None: self.rate_limiter.set_ul_limit(self.ul_rate_limit) self.prm = OnlyFreePaymentsManager() self.storage = SQLiteStorage(self.db_dir) self.blob_manager = DiskBlobManager(self.blob_dir, self.storage) self.lbry_file_manager = EncryptedFileManager( FakePeerFinder(5553, self.peer_manager, 1), self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, StreamDescriptorIdentifier()) yield self.storage.setup() yield self.blob_manager.setup() yield self.lbry_file_manager.setup() query_handler_factories = { 1: BlobAvailabilityHandlerFactory(self.blob_manager), 2: BlobRequestHandlerFactory(self.blob_manager, self.wallet, self.prm, None), 3: self.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(self.rate_limiter, query_handler_factories, self.peer_manager) self.server_port = reactor.listenTCP(5553, server_factory, interface="localhost") test_file = GenFile(self.file_size, bytes(i for i in range(0, 64, 6))) lbry_file = yield create_lbry_file(self.blob_manager, self.storage, self.prm, self.lbry_file_manager, "test_file", test_file) defer.returnValue(lbry_file.sd_hash) @defer.inlineCallbacks def stop(self): lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield self.storage.stop() self.server_port.stopListening() rm_db_and_blob_dir(self.db_dir, self.blob_dir) if os.path.exists("test_file"): os.remove("test_file")
def start_lbry_reuploader(sd_hash, kill_event, dead_event, ready_event, n, ul_rate_limit=None, is_generous=False): use_epoll_on_linux() init_conf_windows() from twisted.internet import reactor logging.debug("Starting the uploader") wallet = FakeWallet() peer_port = 5553 + n peer_manager = PeerManager() peer_finder = FakePeerFinder(5553, peer_manager, 1) hash_announcer = FakeAnnouncer() rate_limiter = RateLimiter() sd_identifier = StreamDescriptorIdentifier() db_dir, blob_dir = mk_db_and_blob_dir() session = Session( conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, node_id="abcd" + str(n), dht_node_port=4446, dht_node_class=FakeNode, peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=blob_dir, peer_port=peer_port, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet, blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1], external_ip="127.0.0.1") lbry_file_manager = EncryptedFileManager(session, sd_identifier) if ul_rate_limit is not None: session.rate_limiter.set_ul_limit(ul_rate_limit) def make_downloader(metadata, prm, download_directory): factories = metadata.factories return factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, download_directory) def download_file(): prm = session.payment_rate_manager d = download_sd_blob(session, sd_hash, prm) d.addCallback(sd_identifier.get_metadata_for_sd_blob) d.addCallback(make_downloader, prm, db_dir) d.addCallback(lambda downloader: downloader.start()) return d def start_transfer(): logging.debug("Starting the transfer") d = session.setup() d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier)) d.addCallback(lambda _: lbry_file_manager.setup()) d.addCallback(lambda _: download_file()) return d def start_server(): server_port = None query_handler_factories = { 1: BlobAvailabilityHandlerFactory(session.blob_manager), 2: BlobRequestHandlerFactory(session.blob_manager, session.wallet, session.payment_rate_manager, None), 3: session.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(session.rate_limiter, query_handler_factories, session.peer_manager) server_port = reactor.listenTCP(peer_port, server_factory) logging.debug("Started listening") def kill_server(): ds = [] ds.append(session.shut_down()) ds.append(lbry_file_manager.stop()) if server_port: ds.append(server_port.stopListening()) ds.append(rm_db_and_blob_dir(db_dir, blob_dir)) kill_check.stop() dead_event.set() dl = defer.DeferredList(ds) dl.addCallback(lambda _: reactor.stop()) return dl def check_for_kill(): if kill_event.is_set(): kill_server() kill_check = task.LoopingCall(check_for_kill) kill_check.start(1.0) ready_event.set() logging.debug("set the ready event") d = task.deferLater(reactor, 1.0, start_transfer) d.addCallback(lambda _: start_server()) if not reactor.running: reactor.run()