class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self.ipv8 = None self.state_cb_count = 0 self.previous_active_downloads = [] self.download_states_lc = None self.get_peer_list = [] self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.session_lock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.api_manager = None self.watch_folder = None self.version_check_manager = None self.resource_monitor = None self.category = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.video_server = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None self.triblerchain_community = None self.startup_deferred = Deferred() self.credit_mining_manager = None self.market_community = None def register(self, session, session_lock): assert isInIOThread() if not self.registered: self.registered = True self.session = session self.session_lock = session_lock # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root # certificates provided by the system trust store. if sys.platform == 'darwin': os.environ['SSL_CERT_FILE'] = os.path.join( get_lib_path(), 'root_certs_mac.pem') if self.session.config.get_torrent_store_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore( self.session.config.get_torrent_store_dir()) if not self.torrent_store.get_db(): raise RuntimeError( "Torrent store (leveldb) is None which should not normally happen" ) if self.session.config.get_metadata_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore( self.session.config.get_metadata_store_dir()) if not self.metadata_store.get_db(): raise RuntimeError( "Metadata store (leveldb) is None which should not normally happen" ) # torrent collecting: RemoteTorrentHandler if self.session.config.get_torrent_collecting_enabled(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.config.get_megacache_enabled(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import ( PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Core.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.config.get_state_dir()) self.category = Category() # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) if self.session.config.get_video_server_enabled(): self.video_server = VideoServer( self.session.config.get_video_server_port(), self.session) self.video_server.start() # IPv8 if self.session.config.get_ipv8_enabled(): from Tribler.pyipv8.ipv8.configuration import get_default_configuration ipv8_config = get_default_configuration() ipv8_config['port'] = self.session.config.get_dispersy_port() ipv8_config['address'] = self.session.config.get_ipv8_address() ipv8_config['overlays'] = [] ipv8_config['keys'] = [] # We load the keys ourselves if self.session.config.get_ipv8_bootstrap_override(): import Tribler.pyipv8.ipv8.deprecated.community as community_file community_file._DEFAULT_ADDRESSES = [ self.session.config.get_ipv8_bootstrap_override() ] community_file._DNS_ADDRESSES = [] self.ipv8 = IPv8(ipv8_config) self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", self.session.config. get_tunnel_community_socks5_listen_ports())) # Dispersy self.tftp_handler = None if self.session.config.get_dispersy_enabled(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import MIMEndpoint from Tribler.dispersy.endpoint import IPv8toDispersyAdapter # set communication endpoint if self.session.config.get_ipv8_enabled(): dispersy_endpoint = IPv8toDispersyAdapter( self.ipv8.endpoint) else: dispersy_endpoint = MIMEndpoint( self.session.config.get_dispersy_port()) working_directory = unicode( self.session.config.get_state_dir()) self.dispersy = Dispersy(dispersy_endpoint, working_directory) self.dispersy.statistics.enable_debug_statistics(False) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, dispersy_endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() # Torrent search if self.session.config.get_torrent_search_enabled( ) or self.session.config.get_channel_search_enabled(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if not self.initComplete: self.init() self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED]) self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None) return self.startup_deferred def on_tribler_started(self, subject, changetype, objectID, *args): reactor.callFromThread(self.startup_deferred.callback, None) @blocking_call_on_reactor_thread def load_ipv8_overlays(self): # Discovery Community with open(self.session.config.get_permid_keypair_filename(), 'r') as key_file: content = key_file.read() content = content[31:-30].replace('\n', '').decode("BASE64") peer = Peer(M2CryptoSK(keystring=content)) discovery_community = DiscoveryCommunity(peer, self.ipv8.endpoint, self.ipv8.network) discovery_community.resolve_dns_bootstrap_addresses() self.ipv8.overlays.append(discovery_community) self.ipv8.strategies.append((RandomChurn(discovery_community), -1)) if not self.session.config.get_dispersy_enabled(): self.ipv8.strategies.append((RandomWalk(discovery_community), 20)) # TriblerChain Community if self.session.config.get_trustchain_enabled(): triblerchain_peer = Peer(self.session.trustchain_keypair) from Tribler.community.triblerchain.community import TriblerChainCommunity self.triblerchain_community = TriblerChainCommunity( triblerchain_peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, working_directory=self.session.config.get_state_dir()) self.ipv8.overlays.append(self.triblerchain_community) self.ipv8.strategies.append( (EdgeWalk(self.triblerchain_community), 20)) # Tunnel Community if self.session.config.get_tunnel_community_enabled(): tunnel_peer = Peer(self.session.trustchain_keypair) from Tribler.community.triblertunnel.community import TriblerTunnelCommunity self.tunnel_community = TriblerTunnelCommunity( tunnel_peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, dht_provider=MainlineDHTProvider( self.mainline_dht, self.session.config.get_dispersy_port()), triblerchain_community=self.triblerchain_community) self.ipv8.overlays.append(self.tunnel_community) self.ipv8.strategies.append( (RandomWalk(self.tunnel_community), 20)) # Market Community if self.session.config.get_market_community_enabled(): wallets = {} try: from Tribler.community.market.wallet.btc_wallet import BitcoinWallet, BitcoinTestnetWallet wallet_type = BitcoinTestnetWallet if self.session.config.get_btc_testnet( ) else BitcoinWallet btc_wallet = wallet_type( os.path.join(self.session.config.get_state_dir(), 'wallet')) wallets[btc_wallet.get_identifier()] = btc_wallet except ImportError: self._logger.error( "Electrum wallet cannot be found, Bitcoin trading not available!" ) mc_wallet = TrustchainWallet(self.triblerchain_community) wallets[mc_wallet.get_identifier()] = mc_wallet if self.session.config.get_dummy_wallets_enabled(): # For debugging purposes, we create dummy wallets dummy_wallet1 = DummyWallet1() wallets[dummy_wallet1.get_identifier()] = dummy_wallet1 dummy_wallet2 = DummyWallet2() wallets[dummy_wallet2.get_identifier()] = dummy_wallet2 from Tribler.community.market.community import MarketCommunity market_peer = Peer(self.session.tradechain_keypair) self.market_community = MarketCommunity( market_peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, wallets=wallets, working_directory=self.session.config.get_state_dir()) self.ipv8.overlays.append(self.market_community) self.ipv8.strategies.append( (RandomWalk(self.market_community), 20)) @blocking_call_on_reactor_thread def load_dispersy_communities(self): self._logger.info("tribler: Preparing Dispersy communities...") now_time = timemod.time() default_kwargs = {'tribler_session': self.session} # Search Community if self.session.config.get_torrent_search_enabled() and self.dispersy: from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load(SearchCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # AllChannel Community if self.session.config.get_channel_search_enabled() and self.dispersy: from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load(AllChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # Channel Community if self.session.config.get_channel_community_enabled( ) and self.dispersy: from Tribler.community.channel.community import ChannelCommunity self.dispersy.define_auto_load(ChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # PreviewChannel Community if self.session.config.get_preview_channel_community_enabled( ) and self.dispersy: from Tribler.community.channel.preview import PreviewChannelCommunity self.dispersy.define_auto_load(PreviewChannelCommunity, self.session.dispersy_member, kargs=default_kwargs) self._logger.info("tribler: communities are ready in %.2f seconds", timemod.time() - now_time) def init(self): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") self.session.readable_status = STATE_STARTING_DISPERSY now = timemod.time() success = self.dispersy.start(self.session.autoload_discovery) diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info( "lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK private_key = self.dispersy.crypto.key_to_bin( M2CryptoSK(filename=self.session.config. get_permid_keypair_filename())) self.session.dispersy_member = blockingCallFromThread( reactor, self.dispersy.get_member, private_key=private_key) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.config.get_megacache_enabled(): self.dispersy.database.attach_commit_callback( self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) self.session.readable_status = STATE_LOADING_COMMUNITIES # We should load the mainline DHT before loading the IPv8 overlays since the DHT is used for the tunnel overlay. if self.session.config.get_mainline_dht_enabled(): self.session.readable_status = STATE_START_MAINLINE_DHT from Tribler.Core.DecentralizedTracking import mainlineDHT self.mainline_dht = mainlineDHT.init( ('127.0.0.1', self.session.config.get_mainline_dht_port()), self.session.config.get_state_dir()) self.upnp_ports.append( (self.session.config.get_mainline_dht_port(), 'UDP')) if self.ipv8: self.load_ipv8_overlays() if self.dispersy: self.load_dispersy_communities() tunnel_community_ports = self.session.config.get_tunnel_community_socks5_listen_ports( ) self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", tunnel_community_ports)) if self.session.config.get_channel_search_enabled( ) and self.session.config.get_dispersy_enabled(): self.session.readable_status = STATE_INITIALIZE_CHANNEL_MGR from Tribler.Core.Modules.channel.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if self.session.config.get_libtorrent_enabled(): self.session.readable_status = STATE_START_LIBTORRENT from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() for port, protocol in self.upnp_ports: self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.config.get_torrent_checking_enabled(): self.session.readable_status = STATE_START_TORRENT_CHECKER self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() if self.rtorrent_handler and self.session.config.get_dispersy_enabled( ): self.session.readable_status = STATE_START_REMOTE_TORRENT_HANDLER self.rtorrent_handler.initialize() if self.api_manager: self.session.readable_status = STATE_START_API_ENDPOINTS self.api_manager.root_endpoint.start_endpoints() if self.session.config.get_watch_folder_enabled(): self.session.readable_status = STATE_START_WATCH_FOLDER self.watch_folder = WatchFolder(self.session) self.watch_folder.start() if self.session.config.get_credit_mining_enabled(): self.session.readable_status = STATE_START_CREDIT_MINING from Tribler.Core.CreditMining.CreditMiningManager import CreditMiningManager self.credit_mining_manager = CreditMiningManager(self.session) if self.session.config.get_resource_monitor_enabled(): self.resource_monitor = ResourceMonitor(self.session) self.resource_monitor.start() self.version_check_manager = VersionCheckManager(self.session) self.session.set_download_states_callback(self.sesscb_states_callback) self.initComplete = True def add(self, tdef, dscfg, pstate=None, setupDelay=0, hidden=False, share_mode=False, checkpoint_disabled=False): """ Called by any thread """ d = None with self.session_lock: if not isinstance( tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Create the destination directory if it does not exist yet try: if not os.path.isdir(dscfg.get_dest_dir()): os.makedirs(dscfg.get_dest_dir()) except OSError: self._logger.error( "Unable to create the download destination directory.") if dscfg.get_time_added() == 0: dscfg.set_time_added(int(timemod.time())) # Check if running or saved on disk if infohash in self.downloads: self._logger.info( "Torrent already exists in the downloads. Infohash:%s", infohash.encode('hex')) from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d setup_deferred = d.setup(dscfg, pstate, wrapperDelay=setupDelay, share_mode=share_mode, checkpoint_disabled=checkpoint_disabled) setup_deferred.addCallback(self.on_download_handle_created) if d and not hidden and self.session.config.get_megacache_enabled(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent( tdef, extra_info={'status': 'good'}) write_my_pref() return d def on_download_handle_created(self, download): """ This method is called when the download handle has been created. Immediately checkpoint the download and write the resume data. """ return download.checkpoint() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ out = None with self.session_lock: out = d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) if self.tunnel_community: self.tunnel_community.on_download_removed(d) return out or succeed(None) def remove_id(self, infohash): @forceDBThread def do_db(): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.config.get_megacache_enabled(): do_db() def get_downloads(self): """ Called by any thread """ with self.session_lock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.session_lock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.session_lock: return infohash in self.downloads @blocking_call_on_reactor_thread @inlineCallbacks def update_download_hops(self, download, new_hops): """ Update the amount of hops for a specified download. This can be done on runtime. """ infohash = binascii.hexlify(download.tdef.get_infohash()) self._logger.info("Updating the amount of hops of download %s", infohash) yield self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(new_hops) self.session.start_download_from_tdef(download.tdef, dscfg) def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch( torrent_id, new_trackers) self.session.notifier.notify( NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.config.get_megacache_enabled(): update_trackers_db(infohash, new_trackers) elif not isinstance( old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def stop_download_states_callback(self): """ Stop any download states callback if present. """ if self.is_pending_task_active("download_states_lc"): self.cancel_pending_task("download_states_lc") def set_download_states_callback(self, user_callback, interval=1.0): """ Set the download state callback. Remove any old callback if it's present. """ self.stop_download_states_callback() self._logger.debug( "Starting the download state callback with interval %f", interval) self.download_states_lc = self.register_task( "download_states_lc", LoopingCall(self._invoke_states_cb, user_callback)) self.download_states_lc.start(interval) def _invoke_states_cb(self, callback): """ Invoke the download states callback with a list of the download states. """ dslist = [] for d in self.downloads.values(): d.set_moreinfo_stats( True in self.get_peer_list or d.get_def().get_infohash() in self.get_peer_list) ds = d.network_get_state(None) dslist.append(ds) def on_cb_done(new_get_peer_list): self.get_peer_list = new_get_peer_list return deferToThread(callback, dslist).addCallback(on_cb_done) def sesscb_states_callback(self, states_list): """ This method is periodically (every second) called with a list of the download states of the active downloads. """ self.state_cb_count += 1 # Check to see if a download has finished new_active_downloads = [] do_checkpoint = False seeding_download_list = [] for ds in states_list: state = ds.get_status() download = ds.get_download() tdef = download.get_def() safename = tdef.get_name_as_unicode() if state == DLSTATUS_DOWNLOADING: new_active_downloads.append(safename) elif state == DLSTATUS_STOPPED_ON_ERROR: self._logger.error("Error during download: %s", repr(ds.get_error())) if self.download_exists(tdef.get_infohash()): self.get_download(tdef.get_infohash()).stop() self.session.notifier.notify(NTFY_TORRENT, NTFY_ERROR, tdef.get_infohash(), repr(ds.get_error())) elif state == DLSTATUS_SEEDING: seeding_download_list.append({ u'infohash': tdef.get_infohash(), u'download': download }) if safename in self.previous_active_downloads: self.session.notifier.notify(NTFY_TORRENT, NTFY_FINISHED, tdef.get_infohash(), safename) do_checkpoint = True elif download.get_hops() == 0 and download.get_safe_seeding(): # Re-add the download with anonymity enabled hops = self.session.config.get_default_number_hops() self.update_download_hops(download, hops) self.previous_active_downloads = new_active_downloads if do_checkpoint: self.session.checkpoint_downloads() if self.state_cb_count % 4 == 0: if self.tunnel_community: self.tunnel_community.monitor_downloads(states_list) if self.credit_mining_manager: self.credit_mining_manager.monitor_downloads(states_list) return [] # # Persistence methods # def load_checkpoint(self): """ Called by any thread """ def do_load_checkpoint(): with self.session_lock: for i, filename in enumerate( iglob( os.path.join( self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint() else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume session_lock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('download_defaults', 'saveas') and \ isinstance(pstate.get('download_defaults', 'saveas'), tuple): pstate.set('download_defaults', 'saveas', pstate.get('download_defaults', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: try: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance( ) dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash( infohash) if dest_dir and os.path.isdir(dest_dir): dscfg.set_dest_dir(dest_dir) except ValueError: self._logger.warning("tlm: torrent data invalid") if pstate is not None: has_resume_data = pstate.get('state', 'engineresumedata') is not None self._logger.debug( "tlm: load_checkpoint: resumedata %s", 'len %s ' % len(pstate.get('state', 'engineresumedata')) if has_resume_data else 'None') if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists(tdef.get_infohash()): self.add(tdef, dscfg, pstate, setupDelay=setupDelay) else: self._logger.info( "tlm: not resuming checkpoint because download has already been added" ) except Exception as e: self._logger.exception( "tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint_downloads(self): """ Checkpoints all running downloads in Tribler. Even if the list of Downloads changes in the mean time this is no problem. For removals, dllist will still hold a pointer to the download, and additions are no problem (just won't be included in list of states returned via callback). """ downloads = self.downloads.values() deferred_list = [] self._logger.debug("tlm: checkpointing %s downloads", len(downloads)) for download in downloads: deferred_list.append(download.checkpoint()) return DeferredList(deferred_list) def shutdown_downloads(self): """ Shutdown all downloads in Tribler. """ for download in self.downloads.values(): download.stop() def remove_pstate(self, infohash): def do_remove(): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug( "remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning( "remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) reactor.callFromThread(do_remove) @inlineCallbacks def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. :returns a Deferred that will fire once all dependencies acknowledge they have shutdown. """ self._logger.info("tlm: early_shutdown") self.shutdown_task_manager() # Note: session_lock not held self.shutdownstarttime = timemod.time() if self.credit_mining_manager: yield self.credit_mining_manager.shutdown() self.credit_mining_manager = None if self.torrent_checker: yield self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: yield self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: yield self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: yield self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.video_server: yield self.video_server.shutdown_server() self.video_server = None if self.version_check_manager: self.version_check_manager.stop() self.version_check_manager = None if self.resource_monitor: self.resource_monitor.stop() self.resource_monitor = None self.tracker_manager = None if self.tunnel_community and self.triblerchain_community: # We unload these overlays manually since the triblerchain has to be unloaded after the tunnel overlay. yield self.ipv8.unload_overlay(self.tunnel_community) yield self.ipv8.unload_overlay(self.triblerchain_community) if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = yield self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info( "lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.ipv8: yield self.ipv8.stop(stop_reactor=False) if self.metadata_store is not None: yield self.metadata_store.close() self.metadata_store = None if self.tftp_handler is not None: yield self.tftp_handler.shutdown() self.tftp_handler = None if self.channelcast_db is not None: yield self.channelcast_db.close() self.channelcast_db = None if self.votecast_db is not None: yield self.votecast_db.close() self.votecast_db = None if self.mypref_db is not None: yield self.mypref_db.close() self.mypref_db = None if self.torrent_db is not None: yield self.torrent_db.close() self.torrent_db = None if self.peer_db is not None: yield self.peer_db.close() self.peer_db = None if self.mainline_dht is not None: from Tribler.Core.DecentralizedTracking import mainlineDHT yield mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: yield self.torrent_store.close() self.torrent_store = None if self.watch_folder is not None: yield self.watch_folder.stop() self.watch_folder = None # We close the API manager as late as possible during shutdown. if self.api_manager is not None: yield self.api_manager.stop() self.api_manager = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info( "tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr is not None: self.ltmgr.shutdown() self.ltmgr = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ self.downloads[infohash].pstate_for_restart = pstate self.register_task("save_pstate %f" % timemod.clock(), self.downloads[infohash].save_resume_data()) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate
class TestLibtorrentMgr(TriblerCoreTest): FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) LIBTORRENT_FILES_DIR = os.path.abspath( os.path.join(FILE_DIR, u"../data/libtorrent/")) @blocking_call_on_reactor_thread @inlineCallbacks def setUp(self, annotate=True): yield super(TestLibtorrentMgr, self).setUp(annotate) self.tribler_session = FakeTriblerSession(self.session_base_dir) self.ltmgr = LibtorrentMgr(self.tribler_session) @blocking_call_on_reactor_thread @inlineCallbacks def tearDown(self, annotate=True): self.ltmgr.shutdown() self.assertTrue( os.path.exists(os.path.join(self.session_base_dir, 'lt.state'))) yield super(TestLibtorrentMgr, self).tearDown(annotate) def test_get_session_zero_hops(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_one_hop(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(1) self.assertTrue(ltsession) def test_get_session_zero_hops_corrupt_lt_state(self): file = open(os.path.join(self.session_base_dir, 'lt.state'), "w") file.write("Lorem ipsum") file.close() self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_zero_hops_working_lt_state(self): shutil.copy(os.path.join(self.LIBTORRENT_FILES_DIR, 'lt.state'), os.path.join(self.session_base_dir, 'lt.state')) self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_metainfo_not_ready(self): """ Testing the metainfo fetching method when the DHT is not ready """ self.ltmgr.initialize() self.assertFalse(self.ltmgr.get_metainfo("a" * 20, None)) @deferred(timeout=20) def test_get_metainfo(self): """ Testing the metainfo fetching method """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, "test") test_deferred.callback(None) self.ltmgr.initialize() self.ltmgr.is_dht_ready = lambda: True self.ltmgr.metainfo_cache[("a" * 20).encode('hex')] = { 'meta_info': 'test' } self.ltmgr.get_metainfo("a" * 20, metainfo_cb) return test_deferred @deferred(timeout=20) def test_got_metainfo_timeout(self): """ Testing whether the callback is correctly invoked when we received metainfo after timeout """ test_deferred = Deferred() def metainfo_timeout_cb(metainfo): self.assertEqual(metainfo, 'a' * 20) test_deferred.callback(None) fake_handle = MockObject() self.ltmgr.initialize() self.ltmgr.metainfo_requests[('a' * 20).encode('hex')] = { 'handle': fake_handle, 'timeout_callbacks': [metainfo_timeout_cb], 'callbacks': [], 'notify': True } self.ltmgr.get_session().remove_torrent = lambda _dummy1, _dummy2: None self.ltmgr.got_metainfo(('a' * 20).encode('hex'), timeout=True) return test_deferred def test_add_torrent(self): """ Testing the addition of a torrent to the libtorrent manager """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 self.assertEqual(self.ltmgr.add_torrent(None, {'ti': infohash}), mock_handle) self.assertRaises(DuplicateDownloadException, self.ltmgr.add_torrent, None, {'ti': infohash}) def test_start_download_duplicate(self): """ Test the starting of a download when there are no new trackers """ mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 mock_tdef.get_trackers_as_single_tuple = lambda: tuple() mock_download = MockObject() mock_download.get_def = lambda: mock_tdef self.tribler_session.get_download = lambda _: mock_download self.ltmgr.trsession = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.assertRaises(DuplicateDownloadException, self.ltmgr.start_download, infohash='a' * 20, tdef=mock_tdef)
class TestLibtorrentMgr(AbstractServer): FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) LIBTORRENT_FILES_DIR = os.path.abspath(os.path.join(FILE_DIR, u"../data/libtorrent/")) @inlineCallbacks def setUp(self): yield super(TestLibtorrentMgr, self).setUp() self.tribler_session = MockObject() self.tribler_session.lm = MockObject() self.tribler_session.notifier = Notifier() self.tribler_session.state_dir = self.session_base_dir self.tribler_session.trustchain_keypair = MockObject() self.tribler_session.trustchain_keypair.key_to_hash = lambda: 'a' * 20 self.tribler_session.notify_shutdown_state = lambda _: None self.tribler_session.config = MockObject() self.tribler_session.config.get_libtorrent_utp = lambda: True self.tribler_session.config.get_libtorrent_proxy_settings = lambda: (0, None, None) self.tribler_session.config.get_anon_proxy_settings = lambda: (2, ('127.0.0.1', [1338]), None) self.tribler_session.config.get_libtorrent_port = lambda: 1337 self.tribler_session.config.get_anon_listen_port = lambda: 1338 self.tribler_session.config.get_state_dir = lambda: self.session_base_dir self.tribler_session.config.set_listen_port_runtime = lambda: None self.tribler_session.config.get_libtorrent_max_upload_rate = lambda: 100 self.tribler_session.config.get_libtorrent_max_download_rate = lambda: 120 self.tribler_session.config.get_libtorrent_dht_enabled = lambda: False self.tribler_session.config.set_libtorrent_port_runtime = lambda _: None self.ltmgr = LibtorrentMgr(self.tribler_session) @inlineCallbacks def tearDown(self): self.ltmgr.shutdown(timeout=0) self.assertTrue(os.path.exists(os.path.join(self.session_base_dir, 'lt.state'))) yield super(TestLibtorrentMgr, self).tearDown() def test_get_session_zero_hops(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_one_hop(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(1) self.assertTrue(ltsession) def test_get_session_zero_hops_corrupt_lt_state(self): file = open(os.path.join(self.session_base_dir, 'lt.state'), "w") file.write("Lorem ipsum") file.close() self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_zero_hops_working_lt_state(self): shutil.copy(os.path.join(self.LIBTORRENT_FILES_DIR, 'lt.state'), os.path.join(self.session_base_dir, 'lt.state')) self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_metainfo_not_ready(self): """ Testing the metainfo fetching method when the DHT is not ready """ self.ltmgr.initialize() self.assertFalse(self.ltmgr.get_metainfo("a" * 20, None)) @trial_timeout(20) def test_get_metainfo(self): """ Testing the metainfo fetching method """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, {'info': {'pieces': ['a']}, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': []}) test_deferred.callback(None) infohash = "a" * 20 self.ltmgr.initialize() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle = MockObject() fake_handle.is_valid = lambda: True fake_handle.has_metadata = lambda: True fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.add_torrent = lambda *_: fake_handle self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None fake_alert = type('lt.metadata_received_alert', (object,), dict(handle=fake_handle)) self.ltmgr.ltsession_metainfo.pop_alerts = lambda: [fake_alert] self.ltmgr.get_metainfo(unhexlify(infohash), metainfo_cb) return test_deferred @trial_timeout(20) def test_get_metainfo_cache(self): """ Testing metainfo caching """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, "test") test_deferred.callback(None) self.ltmgr.initialize() self.ltmgr.metainfo_cache[hexlify("a" * 20)] = {'meta_info': 'test'} self.ltmgr.get_metainfo("a" * 20, metainfo_cb) return test_deferred @trial_timeout(20) def test_got_metainfo(self): """ Testing whether the callback is correctly invoked when we received metainfo """ test_deferred = Deferred() self.ltmgr.initialize() def metainfo_cb(metainfo): self.assertDictEqual(metainfo, {'info': {'pieces': ['a']}, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': []}) test_deferred.callback(None) fake_handle = MockObject() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None self.ltmgr.metainfo_requests['a' * 20] = { 'handle': fake_handle, 'timeout_callbacks': [], 'callbacks': [metainfo_cb], 'notify': False } self.ltmgr.got_metainfo("a" * 20) return test_deferred @trial_timeout(20) def test_got_metainfo_timeout(self): """ Testing whether the callback is correctly invoked when we received metainfo after timeout """ test_deferred = Deferred() def metainfo_timeout_cb(metainfo): self.assertEqual(metainfo, 'a' * 20) test_deferred.callback(None) fake_handle = MockObject() self.ltmgr.initialize() self.ltmgr.metainfo_requests[hexlify('a' * 20)] = {'handle': fake_handle, 'timeout_callbacks': [metainfo_timeout_cb], 'callbacks': [], 'notify': True} self.ltmgr.ltsession_metainfo.remove_torrent = lambda _dummy1, _dummy2: None self.ltmgr.got_metainfo(hexlify('a' * 20), timeout=True) return test_deferred @trial_timeout(20) def test_get_metainfo_with_already_added_torrent(self): """ Testing metainfo fetching for a torrent which is already in session. got_metainfo() should be called with timeout=False. """ magnet_link = "magnet:?xt=urn:btih:f72636475a375653083e49d501601675ce3e6619&dn=ubuntu-16.04.3-server-i386.iso" test_deferred = Deferred() def fake_got_metainfo(_, timeout): self.assertFalse(timeout, "Timeout should not be True") test_deferred.callback(None) mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_handle.has_metadata = lambda: True mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.start_upnp = lambda: None mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.ltsession_metainfo = mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') self.ltmgr.got_metainfo = fake_got_metainfo self.ltmgr.get_metainfo(magnet_link, lambda _: None) return test_deferred @trial_timeout(20) def test_add_torrent(self): """ Testing the addition of a torrent to the libtorrent manager """ test_deferred = Deferred() mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: False mock_error = MockObject() mock_error.value = lambda: None mock_alert = type('add_torrent_alert', (object,), dict(handle=mock_handle, error=mock_error))() mock_ltsession = MockObject() mock_ltsession.async_add_torrent = lambda _: reactor.callLater(0.1, self.ltmgr.process_alert, mock_alert) mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 mock_download = MockObject() mock_download.deferred_added = Deferred() def cb_torrent_added(handle): self.assertEqual(handle, mock_handle) test_deferred.callback(None) self.ltmgr.add_torrent(mock_download, {'ti': infohash}).addCallback(cb_torrent_added) return test_deferred @trial_timeout(20) def test_add_torrent_desync(self): """ Testing the addition of a torrent to the libtorrent manager, if it already exists in the session. """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_alert = type('add_torrent_alert', (object,), dict(handle=mock_handle)) mock_ltsession = MockObject() mock_ltsession.async_add_torrent = lambda _: self.ltmgr.process_alert(mock_alert) mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [mock_handle] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 mock_download = MockObject() mock_download.deferred_added = Deferred() return self.ltmgr.add_torrent(mock_download, {'ti': infohash}).addCallback( lambda handle: self.assertEqual(handle, mock_handle) ) def test_remove_invalid_torrent(self): """ Tests a successful removal status of torrents without a handle """ self.ltmgr.initialize() mock_dl = MockObject() mock_dl.handle = None self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_invalid_handle_torrent(self): """ Tests a successful removal status of torrents with an invalid handle """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False mock_dl = MockObject() mock_dl.handle = mock_handle self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_unregistered_torrent(self): """ Tests a successful removal status of torrents which aren't known """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False alert = type('torrent_removed_alert', (object, ), dict(handle=mock_handle, info_hash='0'*20)) self.ltmgr.process_alert(alert()) self.assertNotIn('0'*20, self.ltmgr.torrents) def test_start_download_corrupt(self): """ Testing whether starting the download of a corrupt torrent file raises an exception """ self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') corrupt_file = os.path.join(self.LIBTORRENT_FILES_DIR, 'corrupt_torrent.torrent') self.assertRaises(TorrentFileException, self.ltmgr.start_download, torrentfilename=corrupt_file) def test_start_download_duplicate(self): """ Test the starting of a download when there are no new trackers """ mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 mock_tdef.get_trackers_as_single_tuple = lambda: tuple() mock_download = MockObject() mock_download.get_def = lambda: mock_tdef mock_download.get_credit_mining = lambda: False self.tribler_session.get_download = lambda _: mock_download self.tribler_session.start_download_from_tdef = lambda tdef, _: MockObject() self.ltmgr.tribler_session = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') self.ltmgr.start_download(infohash='a' * 20, tdef=mock_tdef) def test_set_proxy_settings(self): """ Test setting the proxy settings """ def on_proxy_set(settings): self.assertTrue(settings) self.assertEqual(settings.hostname, 'a') self.assertEqual(settings.port, 1234) self.assertEqual(settings.username, 'abc') self.assertEqual(settings.password, 'def') def on_set_settings(settings): self.assertTrue(settings) self.assertEqual(settings['proxy_hostname'], 'a') self.assertEqual(settings['proxy_port'], 1234) self.assertEqual(settings['proxy_username'], 'abc') self.assertEqual(settings['proxy_password'], 'def') self.assertEqual(settings['proxy_peer_connections'], True) self.assertEqual(settings['proxy_hostnames'], True) mock_lt_session = MockObject() mock_lt_session.get_settings = lambda: {} mock_lt_session.set_settings = on_set_settings mock_lt_session.set_proxy = on_proxy_set # Libtorrent < 1.1.0 uses set_proxy to set proxy settings self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') self.ltmgr.set_proxy_settings(mock_lt_session, 0, ('a', "1234"), ('abc', 'def')) def test_save_resume_preresolved_magnet(self): """ Test whether a magnet link correctly writes save-resume data before it is resolved. This can happen when a magnet link is added when the user does not have internet. """ self.ltmgr.initialize() self.ltmgr.trsession = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 self.tribler_session.get_download = lambda _: None self.tribler_session.get_downloads_pstate_dir = lambda: self.ltmgr.metadata_tmpdir mock_lm = MockObject() mock_lm.ltmgr = self.ltmgr mock_lm.tunnel_community = None self.tribler_session.lm = mock_lm def dl_from_tdef(tdef, _): dl = LibtorrentDownloadImpl(self.tribler_session, tdef) dl.setup() dl.cancel_all_pending_tasks() return dl self.tribler_session.start_download_from_tdef = dl_from_tdef download = self.ltmgr.start_download_from_magnet("magnet:?xt=urn:btih:" + ('1'*40)) basename = hexlify(download.get_def().get_infohash()) + '.state' filename = os.path.join(download.session.get_downloads_pstate_dir(), basename) self.assertTrue(os.path.isfile(filename)) @trial_timeout(5) def test_callback_on_alert(self): """ Test whether the alert callback is called when a libtorrent alert is posted """ self.ltmgr.default_alert_mask = 0xffffffff test_deferred = Deferred() def callback(*args): self.ltmgr.alert_callback = None test_deferred.callback(None) callback.called = False self.ltmgr.alert_callback = callback self.ltmgr.initialize() self.ltmgr._task_process_alerts() return test_deferred def test_payout_on_disconnect(self): """ Test whether a payout is initialized when a peer disconnects """ class peer_disconnected_alert(object): def __init__(self): self.pid = MockObject() self.pid.to_string = lambda: 'a' * 20 def mocked_do_payout(mid): self.assertEqual(mid, 'a' * 20) mocked_do_payout.called = True mocked_do_payout.called = False disconnect_alert = peer_disconnected_alert() self.ltmgr.tribler_session.lm.payout_manager = MockObject() self.ltmgr.tribler_session.lm.payout_manager.do_payout = mocked_do_payout self.ltmgr.initialize() self.ltmgr.get_session(0).pop_alerts = lambda: [disconnect_alert] self.ltmgr._task_process_alerts() self.assertTrue(mocked_do_payout.called) def test_post_session_stats(self): """ Test whether post_session_stats actually updates the state of libtorrent readiness for clean shutdown. """ def check_if_session_shutdown_is_ready(): self.ltmgr._task_process_alerts() self.assertTrue(self.ltmgr.lt_session_shutdown_ready[0]) self.ltmgr.default_alert_mask = 0xffffffff self.ltmgr.initialize() # Zero hop session should be initialized self.assertFalse(self.ltmgr.lt_session_shutdown_ready[0]) # Check for status with session stats alert self.ltmgr.post_session_stats(hops=0) # Wait sometime to get the alert and check the status return deferLater(reactor, 0.01, check_if_session_shutdown_is_ready)
class TestLibtorrentMgr(TriblerCoreTest): FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) LIBTORRENT_FILES_DIR = os.path.abspath( os.path.join(FILE_DIR, u"../data/libtorrent/")) @blocking_call_on_reactor_thread @inlineCallbacks def setUp(self, annotate=True): yield super(TestLibtorrentMgr, self).setUp(annotate) self.tribler_session = FakeTriblerSession(self.session_base_dir) self.ltmgr = LibtorrentMgr(self.tribler_session) @blocking_call_on_reactor_thread @inlineCallbacks def tearDown(self, annotate=True): self.ltmgr.shutdown() self.assertTrue( os.path.exists(os.path.join(self.session_base_dir, 'lt.state'))) yield super(TestLibtorrentMgr, self).tearDown(annotate) def test_get_session_zero_hops(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_one_hop(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(1) self.assertTrue(ltsession) def test_get_session_zero_hops_corrupt_lt_state(self): file = open(os.path.join(self.session_base_dir, 'lt.state'), "w") file.write("Lorem ipsum") file.close() self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_zero_hops_working_lt_state(self): shutil.copy(os.path.join(self.LIBTORRENT_FILES_DIR, 'lt.state'), os.path.join(self.session_base_dir, 'lt.state')) self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_metainfo_not_ready(self): """ Testing the metainfo fetching method when the DHT is not ready """ self.ltmgr.initialize() self.assertFalse(self.ltmgr.get_metainfo("a" * 20, None)) @deferred(timeout=20) def test_get_metainfo(self): """ Testing the metainfo fetching method """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual( metainfo, { 'info': { 'pieces': ['a'] }, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': [] }) test_deferred.callback(None) infohash = "a" * 20 self.ltmgr.initialize() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle = MockObject() fake_handle.is_valid = lambda: True fake_handle.has_metadata = lambda: True fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.add_torrent = lambda *_: fake_handle self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None fake_alert = type('lt.metadata_received_alert', (object, ), dict(handle=fake_handle)) self.ltmgr.ltsession_metainfo.pop_alerts = lambda: [fake_alert] self.ltmgr.is_dht_ready = lambda: True self.ltmgr.get_metainfo(infohash.decode('hex'), metainfo_cb) return test_deferred @deferred(timeout=20) def test_get_metainfo_cache(self): """ Testing metainfo caching """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, "test") test_deferred.callback(None) self.ltmgr.initialize() self.ltmgr.is_dht_ready = lambda: True self.ltmgr.metainfo_cache[("a" * 20).encode('hex')] = { 'meta_info': 'test' } self.ltmgr.get_metainfo("a" * 20, metainfo_cb) return test_deferred @deferred(timeout=20) def test_got_metainfo(self): """ Testing whether the callback is correctly invoked when we received metainfo """ test_deferred = Deferred() self.ltmgr.initialize() def metainfo_cb(metainfo): self.assertDictEqual( metainfo, { 'info': { 'pieces': ['a'] }, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': [] }) test_deferred.callback(None) fake_handle = MockObject() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None self.ltmgr.metainfo_requests['a' * 20] = { 'handle': fake_handle, 'timeout_callbacks': [], 'callbacks': [metainfo_cb], 'notify': False } self.ltmgr.got_metainfo("a" * 20) return test_deferred @deferred(timeout=20) def test_got_metainfo_timeout(self): """ Testing whether the callback is correctly invoked when we received metainfo after timeout """ test_deferred = Deferred() def metainfo_timeout_cb(metainfo): self.assertEqual(metainfo, 'a' * 20) test_deferred.callback(None) fake_handle = MockObject() self.ltmgr.initialize() self.ltmgr.metainfo_requests[('a' * 20).encode('hex')] = { 'handle': fake_handle, 'timeout_callbacks': [metainfo_timeout_cb], 'callbacks': [], 'notify': True } self.ltmgr.ltsession_metainfo.remove_torrent = lambda _dummy1, _dummy2: None self.ltmgr.got_metainfo(('a' * 20).encode('hex'), timeout=True) return test_deferred @deferred(timeout=20) def test_get_metainfo_with_already_added_torrent(self): """ Testing metainfo fetching for a torrent which is already in session. got_metainfo() should be called with timeout=False. """ magnet_link = "magnet:?xt=urn:btih:f72636475a375653083e49d501601675ce3e6619&dn=ubuntu-16.04.3-server-i386.iso" test_deferred = Deferred() def fake_got_metainfo(_, timeout): self.assertFalse(timeout, "Timeout should not be True") test_deferred.callback(None) mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_handle.has_metadata = lambda: True mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.start_upnp = lambda: None mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.ltsession_metainfo = mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.ltmgr.is_dht_ready = lambda: True self.ltmgr.got_metainfo = fake_got_metainfo self.ltmgr.get_metainfo(magnet_link, lambda _: None) return test_deferred def test_add_torrent(self): """ Testing the addition of a torrent to the libtorrent manager """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: False mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 self.assertEqual(self.ltmgr.add_torrent(None, {'ti': infohash}), mock_handle) self.assertRaises(DuplicateDownloadException, self.ltmgr.add_torrent, None, {'ti': infohash}) def test_add_torrent_desync(self): """ Testing the addition of a torrent to the libtorrent manager, if it already exists in the session. """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [mock_handle] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 self.assertEqual(self.ltmgr.add_torrent(None, {'ti': infohash}), mock_handle) def test_remove_invalid_torrent(self): """ Tests a successful removal status of torrents without a handle """ self.ltmgr.initialize() mock_dl = MockObject() mock_dl.handle = None self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_invalid_handle_torrent(self): """ Tests a successful removal status of torrents with an invalid handle """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False mock_dl = MockObject() mock_dl.handle = mock_handle self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_unregistered_torrent(self): """ Tests a successful removal status of torrents which aren't known """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False alert = type('torrent_removed_alert', (object, ), dict(handle=mock_handle, info_hash='0' * 20)) self.ltmgr.process_alert(alert()) self.assertNotIn('0' * 20, self.ltmgr.torrents) def test_start_download_corrupt(self): """ Testing whether starting the download of a corrupt torrent file raises an exception """ self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') corrupt_file = os.path.join(self.LIBTORRENT_FILES_DIR, 'corrupt_torrent.torrent') self.assertRaises(TorrentFileException, self.ltmgr.start_download, torrentfilename=corrupt_file) def test_start_download_duplicate(self): """ Test the starting of a download when there are no new trackers """ mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 mock_tdef.get_trackers_as_single_tuple = lambda: tuple() mock_download = MockObject() mock_download.get_def = lambda: mock_tdef self.tribler_session.get_download = lambda _: mock_download self.ltmgr.trsession = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.assertRaises(DuplicateDownloadException, self.ltmgr.start_download, infohash='a' * 20, tdef=mock_tdef) def test_set_proxy_settings(self): """ Test setting the proxy settings """ def on_proxy_set(settings): self.assertTrue(settings) self.assertEqual(settings.hostname, 'a') self.assertEqual(settings.port, 1234) self.assertEqual(settings.username, 'abc') self.assertEqual(settings.password, 'def') def on_set_settings(settings): self.assertTrue(settings) self.assertEqual(settings['proxy_hostname'], 'a') self.assertEqual(settings['proxy_port'], 1234) self.assertEqual(settings['proxy_username'], 'abc') self.assertEqual(settings['proxy_password'], 'def') self.assertEqual(settings['proxy_peer_connections'], True) self.assertEqual(settings['proxy_hostnames'], True) mock_lt_session = MockObject() mock_lt_session.get_settings = lambda: {} mock_lt_session.set_settings = on_set_settings mock_lt_session.set_proxy = on_proxy_set # Libtorrent < 1.1.0 uses set_proxy to set proxy settings self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.ltmgr.set_proxy_settings(mock_lt_session, 0, ('a', "1234"), ('abc', 'def')) def test_save_resume_preresolved_magnet(self): """ Test whether a magnet link correctly writes save-resume data before it is resolved. This can happen when a magnet link is added when the user does not have internet. """ self.ltmgr.initialize() self.ltmgr.trsession = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 self.tribler_session.get_download = lambda _: None self.tribler_session.get_downloads_pstate_dir = lambda: self.ltmgr.metadata_tmpdir mock_lm = MockObject() mock_lm.ltmgr = self.ltmgr mock_lm.tunnel_community = None self.tribler_session.lm = mock_lm def dl_from_tdef(tdef, _): dl = LibtorrentDownloadImpl(self.tribler_session, tdef) dl.setup() dl.cancel_all_pending_tasks() return dl self.tribler_session.start_download_from_tdef = dl_from_tdef download = self.ltmgr.start_download_from_magnet( "magnet:?xt=urn:btih:" + ('1' * 40)) basename = binascii.hexlify( download.get_def().get_infohash()) + '.state' filename = os.path.join(download.session.get_downloads_pstate_dir(), basename) self.assertTrue(os.path.isfile(filename))
class TestLibtorrentMgr(AbstractServer): FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) LIBTORRENT_FILES_DIR = os.path.abspath( os.path.join(FILE_DIR, u"../data/libtorrent/")) @inlineCallbacks def setUp(self): yield super(TestLibtorrentMgr, self).setUp() self.tribler_session = MockObject() self.tribler_session.lm = MockObject() self.tribler_session.notifier = Notifier() self.tribler_session.state_dir = self.session_base_dir self.tribler_session.trustchain_keypair = MockObject() self.tribler_session.trustchain_keypair.key_to_hash = lambda: 'a' * 20 self.tribler_session.notify_shutdown_state = lambda _: None self.tribler_session.config = MockObject() self.tribler_session.config.get_libtorrent_utp = lambda: True self.tribler_session.config.get_libtorrent_proxy_settings = lambda: ( 0, None, None) self.tribler_session.config.get_anon_proxy_settings = lambda: (2, ( '127.0.0.1', [1338]), None) self.tribler_session.config.get_libtorrent_port = lambda: 1337 self.tribler_session.config.get_anon_listen_port = lambda: 1338 self.tribler_session.config.get_state_dir = lambda: self.session_base_dir self.tribler_session.config.set_listen_port_runtime = lambda: None self.tribler_session.config.get_libtorrent_max_upload_rate = lambda: 100 self.tribler_session.config.get_libtorrent_max_download_rate = lambda: 120 self.tribler_session.config.get_libtorrent_dht_enabled = lambda: False self.tribler_session.config.set_libtorrent_port_runtime = lambda _: None self.ltmgr = LibtorrentMgr(self.tribler_session) @inlineCallbacks def tearDown(self): self.ltmgr.shutdown(timeout=0) self.assertTrue( os.path.exists(os.path.join(self.session_base_dir, 'lt.state'))) yield super(TestLibtorrentMgr, self).tearDown() def test_get_session_zero_hops(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_one_hop(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(1) self.assertTrue(ltsession) def test_get_session_zero_hops_corrupt_lt_state(self): file = open(os.path.join(self.session_base_dir, 'lt.state'), "w") file.write("Lorem ipsum") file.close() self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_zero_hops_working_lt_state(self): shutil.copy(os.path.join(self.LIBTORRENT_FILES_DIR, 'lt.state'), os.path.join(self.session_base_dir, 'lt.state')) self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_metainfo_not_ready(self): """ Testing the metainfo fetching method when the DHT is not ready """ self.ltmgr.initialize() self.assertFalse(self.ltmgr.get_metainfo("a" * 20, None)) @trial_timeout(20) def test_get_metainfo(self): """ Testing the metainfo fetching method """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual( metainfo, { 'info': { 'pieces': ['a'] }, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': [] }) test_deferred.callback(None) infohash = "a" * 20 self.ltmgr.initialize() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle = MockObject() fake_handle.is_valid = lambda: True fake_handle.has_metadata = lambda: True fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.add_torrent = lambda *_: fake_handle self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None fake_alert = type('lt.metadata_received_alert', (object, ), dict(handle=fake_handle)) self.ltmgr.ltsession_metainfo.pop_alerts = lambda: [fake_alert] self.ltmgr.get_metainfo(unhexlify(infohash), metainfo_cb) return test_deferred @trial_timeout(20) def test_get_metainfo_cache(self): """ Testing metainfo caching """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, "test") test_deferred.callback(None) self.ltmgr.initialize() self.ltmgr.metainfo_cache[hexlify("a" * 20)] = {'meta_info': 'test'} self.ltmgr.get_metainfo("a" * 20, metainfo_cb) return test_deferred @trial_timeout(20) def test_got_metainfo(self): """ Testing whether the callback is correctly invoked when we received metainfo """ test_deferred = Deferred() self.ltmgr.initialize() def metainfo_cb(metainfo): self.assertDictEqual( metainfo, { 'info': { 'pieces': ['a'] }, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': [] }) test_deferred.callback(None) fake_handle = MockObject() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.ltsession_metainfo.remove_torrent = lambda *_: None self.ltmgr.metainfo_requests['a' * 20] = { 'handle': fake_handle, 'timeout_callbacks': [], 'callbacks': [metainfo_cb], 'notify': False } self.ltmgr.got_metainfo("a" * 20) return test_deferred @trial_timeout(20) def test_got_metainfo_timeout(self): """ Testing whether the callback is correctly invoked when we received metainfo after timeout """ test_deferred = Deferred() def metainfo_timeout_cb(metainfo): self.assertEqual(metainfo, 'a' * 20) test_deferred.callback(None) fake_handle = MockObject() self.ltmgr.initialize() self.ltmgr.metainfo_requests[hexlify('a' * 20)] = { 'handle': fake_handle, 'timeout_callbacks': [metainfo_timeout_cb], 'callbacks': [], 'notify': True } self.ltmgr.ltsession_metainfo.remove_torrent = lambda _dummy1, _dummy2: None self.ltmgr.got_metainfo(hexlify('a' * 20), timeout=True) return test_deferred @trial_timeout(20) def test_get_metainfo_with_already_added_torrent(self): """ Testing metainfo fetching for a torrent which is already in session. got_metainfo() should be called with timeout=False. """ magnet_link = "magnet:?xt=urn:btih:f72636475a375653083e49d501601675ce3e6619&dn=ubuntu-16.04.3-server-i386.iso" test_deferred = Deferred() def fake_got_metainfo(_, timeout): self.assertFalse(timeout, "Timeout should not be True") test_deferred.callback(None) mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_handle.has_metadata = lambda: True mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.start_upnp = lambda: None mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.ltsession_metainfo = mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.ltmgr.got_metainfo = fake_got_metainfo self.ltmgr.get_metainfo(magnet_link, lambda _: None) return test_deferred @trial_timeout(20) def test_add_torrent(self): """ Testing the addition of a torrent to the libtorrent manager """ test_deferred = Deferred() mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: False mock_error = MockObject() mock_error.value = lambda: None mock_alert = type('add_torrent_alert', (object, ), dict(handle=mock_handle, error=mock_error))() mock_ltsession = MockObject() mock_ltsession.async_add_torrent = lambda _: reactor.callLater( 0.1, self.ltmgr.process_alert, mock_alert) mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 mock_download = MockObject() mock_download.deferred_added = Deferred() def cb_torrent_added(handle): self.assertEqual(handle, mock_handle) test_deferred.callback(None) self.ltmgr.add_torrent(mock_download, { 'ti': infohash }).addCallback(cb_torrent_added) return test_deferred @trial_timeout(20) def test_add_torrent_desync(self): """ Testing the addition of a torrent to the libtorrent manager, if it already exists in the session. """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_handle.is_valid = lambda: True mock_alert = type('add_torrent_alert', (object, ), dict(handle=mock_handle)) mock_ltsession = MockObject() mock_ltsession.async_add_torrent = lambda _: self.ltmgr.process_alert( mock_alert) mock_ltsession.find_torrent = lambda _: mock_handle mock_ltsession.get_torrents = lambda: [mock_handle] mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 mock_download = MockObject() mock_download.deferred_added = Deferred() return self.ltmgr.add_torrent(mock_download, { 'ti': infohash }).addCallback(lambda handle: self.assertEqual(handle, mock_handle)) def test_remove_invalid_torrent(self): """ Tests a successful removal status of torrents without a handle """ self.ltmgr.initialize() mock_dl = MockObject() mock_dl.handle = None self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_invalid_handle_torrent(self): """ Tests a successful removal status of torrents with an invalid handle """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False mock_dl = MockObject() mock_dl.handle = mock_handle self.assertTrue(self.ltmgr.remove_torrent(mock_dl).called) def test_remove_unregistered_torrent(self): """ Tests a successful removal status of torrents which aren't known """ self.ltmgr.initialize() mock_handle = MockObject() mock_handle.is_valid = lambda: False alert = type('torrent_removed_alert', (object, ), dict(handle=mock_handle, info_hash='0' * 20)) self.ltmgr.process_alert(alert()) self.assertNotIn('0' * 20, self.ltmgr.torrents) def test_start_download_corrupt(self): """ Testing whether starting the download of a corrupt torrent file raises an exception """ self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') corrupt_file = os.path.join(self.LIBTORRENT_FILES_DIR, 'corrupt_torrent.torrent') self.assertRaises(TorrentFileException, self.ltmgr.start_download, torrentfilename=corrupt_file) def test_start_download_duplicate(self): """ Test the starting of a download when there are no new trackers """ mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 mock_tdef.get_trackers_as_single_tuple = lambda: tuple() mock_download = MockObject() mock_download.get_def = lambda: mock_tdef mock_download.get_credit_mining = lambda: False self.tribler_session.get_download = lambda _: mock_download self.tribler_session.start_download_from_tdef = lambda tdef, _: MockObject( ) self.ltmgr.tribler_session = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.ltmgr.start_download(infohash='a' * 20, tdef=mock_tdef) def test_set_proxy_settings(self): """ Test setting the proxy settings """ def on_proxy_set(settings): self.assertTrue(settings) self.assertEqual(settings.hostname, 'a') self.assertEqual(settings.port, 1234) self.assertEqual(settings.username, 'abc') self.assertEqual(settings.password, 'def') def on_set_settings(settings): self.assertTrue(settings) self.assertEqual(settings['proxy_hostname'], 'a') self.assertEqual(settings['proxy_port'], 1234) self.assertEqual(settings['proxy_username'], 'abc') self.assertEqual(settings['proxy_password'], 'def') self.assertEqual(settings['proxy_peer_connections'], True) self.assertEqual(settings['proxy_hostnames'], True) mock_lt_session = MockObject() mock_lt_session.get_settings = lambda: {} mock_lt_session.set_settings = on_set_settings mock_lt_session.set_proxy = on_proxy_set # Libtorrent < 1.1.0 uses set_proxy to set proxy settings self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') self.ltmgr.set_proxy_settings(mock_lt_session, 0, ('a', "1234"), ('abc', 'def')) def test_save_resume_preresolved_magnet(self): """ Test whether a magnet link correctly writes save-resume data before it is resolved. This can happen when a magnet link is added when the user does not have internet. """ self.ltmgr.initialize() self.ltmgr.trsession = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp( suffix=u'tribler_metainfo_tmpdir') mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 self.tribler_session.get_download = lambda _: None self.tribler_session.get_downloads_pstate_dir = lambda: self.ltmgr.metadata_tmpdir mock_lm = MockObject() mock_lm.ltmgr = self.ltmgr mock_lm.tunnel_community = None self.tribler_session.lm = mock_lm def dl_from_tdef(tdef, _): dl = LibtorrentDownloadImpl(self.tribler_session, tdef) dl.setup() dl.cancel_all_pending_tasks() return dl self.tribler_session.start_download_from_tdef = dl_from_tdef download = self.ltmgr.start_download_from_magnet( "magnet:?xt=urn:btih:" + ('1' * 40)) basename = hexlify(download.get_def().get_infohash()) + '.state' filename = os.path.join(download.session.get_downloads_pstate_dir(), basename) self.assertTrue(os.path.isfile(filename)) @trial_timeout(5) def test_callback_on_alert(self): """ Test whether the alert callback is called when a libtorrent alert is posted """ self.ltmgr.default_alert_mask = 0xffffffff test_deferred = Deferred() def callback(*args): self.ltmgr.alert_callback = None test_deferred.callback(None) callback.called = False self.ltmgr.alert_callback = callback self.ltmgr.initialize() self.ltmgr._task_process_alerts() return test_deferred def test_payout_on_disconnect(self): """ Test whether a payout is initialized when a peer disconnects """ class peer_disconnected_alert(object): def __init__(self): self.pid = MockObject() self.pid.to_string = lambda: 'a' * 20 def mocked_do_payout(mid): self.assertEqual(mid, 'a' * 20) mocked_do_payout.called = True mocked_do_payout.called = False disconnect_alert = peer_disconnected_alert() self.ltmgr.tribler_session.lm.payout_manager = MockObject() self.ltmgr.tribler_session.lm.payout_manager.do_payout = mocked_do_payout self.ltmgr.initialize() self.ltmgr.get_session(0).pop_alerts = lambda: [disconnect_alert] self.ltmgr._task_process_alerts() self.assertTrue(mocked_do_payout.called) def test_post_session_stats(self): """ Test whether post_session_stats actually updates the state of libtorrent readiness for clean shutdown. """ def check_if_session_shutdown_is_ready(): self.ltmgr._task_process_alerts() self.assertTrue(self.ltmgr.lt_session_shutdown_ready[0]) self.ltmgr.default_alert_mask = 0xffffffff self.ltmgr.initialize() # Zero hop session should be initialized self.assertFalse(self.ltmgr.lt_session_shutdown_ready[0]) # Check for status with session stats alert self.ltmgr.post_session_stats(hops=0) # Wait sometime to get the alert and check the status return deferLater(reactor, 0.01, check_if_session_shutdown_is_ready)
class TestLibtorrentMgr(AbstractServer): FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) LIBTORRENT_FILES_DIR = os.path.abspath(os.path.join(FILE_DIR, u"../data/libtorrent/")) @blocking_call_on_reactor_thread @inlineCallbacks def setUp(self, annotate=True): yield super(TestLibtorrentMgr, self).setUp(annotate) self.tribler_session = MockObject() self.tribler_session.notifier = Notifier() self.tribler_session.state_dir = self.session_base_dir self.tribler_session.config = MockObject() self.tribler_session.config.get_libtorrent_utp = lambda: True self.tribler_session.config.get_libtorrent_proxy_settings = lambda: (0, None, None) self.tribler_session.config.get_anon_proxy_settings = lambda: (2, ('127.0.0.1', [1338]), None) self.tribler_session.config.get_libtorrent_port = lambda: 1337 self.tribler_session.config.get_anon_listen_port = lambda: 1338 self.tribler_session.config.get_state_dir = lambda: self.session_base_dir self.tribler_session.config.set_listen_port_runtime = lambda: None self.tribler_session.config.get_libtorrent_max_upload_rate = lambda: 100 self.tribler_session.config.get_libtorrent_max_download_rate = lambda: 120 self.ltmgr = LibtorrentMgr(self.tribler_session) @blocking_call_on_reactor_thread @inlineCallbacks def tearDown(self, annotate=True): self.ltmgr.shutdown() self.assertTrue(os.path.exists(os.path.join(self.session_base_dir, 'lt.state'))) yield super(TestLibtorrentMgr, self).tearDown(annotate) def test_get_session_zero_hops(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_one_hop(self): self.ltmgr.initialize() ltsession = self.ltmgr.get_session(1) self.assertTrue(ltsession) def test_get_session_zero_hops_corrupt_lt_state(self): file = open(os.path.join(self.session_base_dir, 'lt.state'), "w") file.write("Lorem ipsum") file.close() self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_session_zero_hops_working_lt_state(self): shutil.copy(os.path.join(self.LIBTORRENT_FILES_DIR, 'lt.state'), os.path.join(self.session_base_dir, 'lt.state')) self.ltmgr.initialize() ltsession = self.ltmgr.get_session(0) self.assertTrue(ltsession) def test_get_metainfo_not_ready(self): """ Testing the metainfo fetching method when the DHT is not ready """ self.ltmgr.initialize() self.assertFalse(self.ltmgr.get_metainfo("a" * 20, None)) @deferred(timeout=20) def test_get_metainfo(self): """ Testing the metainfo fetching method """ test_deferred = Deferred() def metainfo_cb(metainfo): self.assertEqual(metainfo, "test") test_deferred.callback(None) self.ltmgr.initialize() self.ltmgr.is_dht_ready = lambda: True self.ltmgr.metainfo_cache[("a" * 20).encode('hex')] = {'meta_info': 'test'} self.ltmgr.get_metainfo("a" * 20, metainfo_cb) return test_deferred @deferred(timeout=20) def test_got_metainfo(self): """ Testing whether the callback is correctly invoked when we received metainfo """ test_deferred = Deferred() self.ltmgr.initialize() def metainfo_cb(metainfo): self.assertDictEqual(metainfo, {'info': {'pieces': ['a']}, 'leechers': 0, 'nodes': [], 'seeders': 0, 'initial peers': []}) test_deferred.callback(None) fake_handle = MockObject() torrent_info = MockObject() torrent_info.metadata = lambda: bencode({'pieces': ['a']}) torrent_info.trackers = lambda: [] fake_handle.get_peer_info = lambda: [] fake_handle.torrent_file = lambda: torrent_info self.ltmgr.get_session().remove_torrent = lambda *_: None self.ltmgr.metainfo_requests['a' * 20] = { 'handle': fake_handle, 'timeout_callbacks': [], 'callbacks': [metainfo_cb], 'notify': False } self.ltmgr.got_metainfo("a" * 20) return test_deferred @deferred(timeout=20) def test_got_metainfo_timeout(self): """ Testing whether the callback is correctly invoked when we received metainfo after timeout """ test_deferred = Deferred() def metainfo_timeout_cb(metainfo): self.assertEqual(metainfo, 'a' * 20) test_deferred.callback(None) fake_handle = MockObject() self.ltmgr.initialize() self.ltmgr.metainfo_requests[('a' * 20).encode('hex')] = {'handle': fake_handle, 'timeout_callbacks': [metainfo_timeout_cb], 'callbacks': [], 'notify': True} self.ltmgr.get_session().remove_torrent = lambda _dummy1, _dummy2: None self.ltmgr.got_metainfo(('a' * 20).encode('hex'), timeout=True) return test_deferred def test_add_torrent(self): """ Testing the addition of a torrent to the libtorrent manager """ mock_handle = MockObject() mock_handle.info_hash = lambda: 'a' * 20 mock_ltsession = MockObject() mock_ltsession.add_torrent = lambda _: mock_handle mock_ltsession.stop_upnp = lambda: None mock_ltsession.save_state = lambda: None self.ltmgr.get_session = lambda *_: mock_ltsession self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') infohash = MockObject() infohash.info_hash = lambda: 'a' * 20 self.assertEqual(self.ltmgr.add_torrent(None, {'ti': infohash}), mock_handle) self.assertRaises(DuplicateDownloadException, self.ltmgr.add_torrent, None, {'ti': infohash}) def test_start_download_corrupt(self): """ Testing whether starting the download of a corrupt torrent file raises an exception """ self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') corrupt_file = os.path.join(self.LIBTORRENT_FILES_DIR, 'corrupt_torrent.torrent') self.assertRaises(TorrentFileException, self.ltmgr.start_download, torrentfilename=corrupt_file) def test_start_download_duplicate(self): """ Test the starting of a download when there are no new trackers """ mock_tdef = MockObject() mock_tdef.get_infohash = lambda: 'a' * 20 mock_tdef.get_trackers_as_single_tuple = lambda: tuple() mock_download = MockObject() mock_download.get_def = lambda: mock_tdef self.tribler_session.get_download = lambda _: mock_download self.ltmgr.tribler_session = self.tribler_session self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') self.assertRaises(DuplicateDownloadException, self.ltmgr.start_download, infohash='a' * 20, tdef=mock_tdef) def test_set_proxy_settings(self): """ Test setting the proxy settings """ def on_proxy_set(settings): self.assertTrue(settings) self.assertEqual(settings.hostname, 'a') self.assertEqual(settings.port, 1234) self.assertEqual(settings.username, 'abc') self.assertEqual(settings.password, 'def') mock_lt_session = MockObject() mock_lt_session.set_proxy = on_proxy_set self.ltmgr.metadata_tmpdir = tempfile.mkdtemp(suffix=u'tribler_metainfo_tmpdir') self.ltmgr.set_proxy_settings(mock_lt_session, 0, ('a', "1234"), ('abc', 'def'))
class TriblerLaunchMany(Thread): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ Thread.__init__(self) self.setDaemon(True) self.setName("Network" + self.getName()) self.initComplete = False self.registered = False self.dispersy = None self.database_thread = None def register(self, session, sesslock): if not self.registered: self.registered = True self.session = session self.sesslock = sesslock self.downloads = {} config = session.sessconfig # Should be safe at startup self.upnp_ports = [] # Orig self.sessdoneflag = Event() self.rawserver = RawServer(self.sessdoneflag, config['timeout_check_interval'], config['timeout'], ipv6_enable=config['ipv6_enabled'], failfunc=self.rawserver_fatalerrorfunc, errorfunc=self.rawserver_nonfatalerrorfunc) self.rawserver.add_task(self.rawserver_keepalive, 1) self.listen_port = config['minport'] self.shutdownstarttime = None self.multihandler = MultiHandler(self.rawserver, self.sessdoneflag) # SWIFTPROC swift_exists = config['swiftproc'] and (os.path.exists(config['swiftpath']) or os.path.exists(config['swiftpath'] + '.exe')) if swift_exists: from Tribler.Core.Swift.SwiftProcessMgr import SwiftProcessMgr self.spm = SwiftProcessMgr(config['swiftpath'], config['swiftcmdlistenport'], config['swiftdlsperproc'], self.session.get_swift_tunnel_listen_port(), self.sesslock) try: self.swift_process = self.spm.get_or_create_sp(self.session.get_swift_working_dir(), self.session.get_torrent_collecting_dir(), self.session.get_swift_tunnel_listen_port(), self.session.get_swift_tunnel_httpgw_listen_port(), self.session.get_swift_tunnel_cmdgw_listen_port()) self.upnp_ports.append((self.session.get_swift_tunnel_listen_port(), 'UDP')) except OSError: # could not find/run swift print >> sys.stderr, "lmc: could not start a swift process" else: self.spm = None self.swift_process = None # Dispersy self.session.dispersy_member = None if config['dispersy']: from Tribler.dispersy.callback import Callback from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import RawserverEndpoint, TunnelEndpoint from Tribler.dispersy.community import HardKilledCommunity # set communication endpoint if config['dispersy-tunnel-over-swift'] and self.swift_process: endpoint = TunnelEndpoint(self.swift_process) else: endpoint = RawserverEndpoint(self.rawserver, config['dispersy_port']) callback = Callback("Dispersy") # WARNING NAME SIGNIFICANT working_directory = unicode(config['state_dir']) self.dispersy = Dispersy(callback, endpoint, working_directory) # TODO: see if we can postpone dispersy.start to improve GUI responsiveness. # However, for now we must start self.dispersy.callback before running # try_register(nocachedb, self.database_thread)! self.dispersy.start() print >> sys.stderr, "lmc: Dispersy is listening on port", self.dispersy.wan_address[1], "using", endpoint self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) self.dispersy.callback.call(self.dispersy.define_auto_load, args=(HardKilledCommunity,), kargs={'load': True}) # notify dispersy finished loading self.session.uch.notify(NTFY_DISPERSY, NTFY_STARTED, None) from Tribler.Core.permid import read_keypair from Tribler.dispersy.crypto import ec_to_public_bin, ec_to_private_bin keypair = read_keypair(self.session.get_permid_keypair_filename()) self.session.dispersy_member = callback.call(self.dispersy.get_member, (ec_to_public_bin(keypair), ec_to_private_bin(keypair))) self.database_thread = callback else: class FakeCallback(): def __init__(self): from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue self.queue = TimedTaskQueue("FakeCallback") def register(self, call, args=(), kargs=None, delay=0.0, priority=0, id_=u"", callback=None, callback_args=(), callback_kargs=None, include_id=False): def do_task(): if kargs: call(*args, **kargs) else: call(*args) if callback: if callback_kargs: callback(*callback_args, **callback_kargs) else: callback(*callback_args) self.queue.add_task(do_task, t=delay) def shutdown(self, immediately=False): self.queue.shutdown(immediately) self.database_thread = FakeCallback() if config['megacache']: import Tribler.Core.CacheDB.cachedb as cachedb from Tribler.Core.CacheDB.SqliteCacheDBHandler import PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler, NetworkBuzzDBHandler, UserEventLogDBHandler from Tribler.Category.Category import Category from Tribler.Core.Tag.Extraction import TermExtraction from Tribler.Core.CacheDB.sqlitecachedb import try_register if DEBUG: print >> sys.stderr, 'tlm: Reading Session state from', config['state_dir'] nocachedb = cachedb.init(config, self.rawserver_fatalerrorfunc) try_register(nocachedb, self.database_thread) self.cat = Category.getInstance(config['install_dir']) self.term = TermExtraction.getInstance(config['install_dir']) self.peer_db = PeerDBHandler.getInstance() self.peer_db.registerConnectionUpdater(self.session) self.torrent_db = TorrentDBHandler.getInstance() self.torrent_db.register(os.path.abspath(config['torrent_collecting_dir'])) self.mypref_db = MyPreferenceDBHandler.getInstance() self.votecast_db = VoteCastDBHandler.getInstance() self.votecast_db.registerSession(self.session) self.channelcast_db = ChannelCastDBHandler.getInstance() self.channelcast_db.registerSession(self.session) self.nb_db = NetworkBuzzDBHandler.getInstance() self.ue_db = UserEventLogDBHandler.getInstance() if self.dispersy: self.dispersy.database.attach_commit_callback(self.channelcast_db._db.commitNow) else: config['torrent_checking'] = 0 self.rtorrent_handler = None if config['torrent_collecting']: from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler() def init(self): config = self.session.sessconfig # Should be safe at startup self.mainline_dht = None if config['mainline_dht']: from Tribler.Core.DecentralizedTracking import mainlineDHT try: self.mainline_dht = mainlineDHT.init(('127.0.0.1', config['mainline_dht_port']), config['state_dir'], config['swiftdhtport']) self.upnp_ports.append((config['mainline_dht_port'], 'UDP')) except: print_exc() self.ltmgr = None if config['libtorrent']: from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session, ignore_singleton=self.session.ignore_singleton) # add task for tracker checking self.torrent_checking = None if config['torrent_checking']: if config['mainline_dht']: # Create torrent-liveliness checker based on DHT from Tribler.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker c = mainlineDHTChecker.getInstance() c.register(self.mainline_dht) try: from Tribler.TrackerChecking.TorrentChecking import TorrentChecking self.torrent_checking_period = config['torrent_checking_period'] self.torrent_checking = TorrentChecking.getInstance(self.torrent_checking_period) self.run_torrent_check() except: print_exc if self.rtorrent_handler: self.rtorrent_handler.register(self.dispersy, self.database_thread, self.session, int(config['torrent_collecting_max_torrents'])) self.initComplete = True def add(self, tdef, dscfg, pstate=None, initialdlstatus=None, commit=True, setupDelay=0, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if not isinstance(tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None and not tdef.get_live(): # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: if DEBUG: print >> sys.stderr, "tlm: add: pstate is", dlstatus_strings[pstate['dlstate']['status']], pstate['dlstate']['progress'] # Store in list of Downloads, always. self.downloads[infohash] = d d.setup(dscfg, pstate, initialdlstatus, self.network_engine_wrapper_created_callback, self.network_vod_event_callback, wrapperDelay=setupDelay) finally: self.sesslock.release() if d and not hidden and self.session.get_megacache(): def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data, commit=commit) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addInfohash(tdef.get_infohash(), commit=commit) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name().encode('utf_8'), commit=commit) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent(tdef, source='', extra_info={'status': 'good'}, commit=commit) write_my_pref() return d def network_engine_wrapper_created_callback(self, d, pstate): """ Called by network thread """ try: if pstate is None: # Checkpoint at startup (infohash, pstate) = d.network_checkpoint() self.save_download_pstate(infohash, pstate) except: print_exc() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ self.sesslock.acquire() try: d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] finally: self.sesslock.release() if not hidden: self.remove_id(infohash) def remove_id(self, hash): # this is a bit tricky, as we do not know if this "id" is a roothash or infohash # however a restart will re-add the preference to mypreference if we remove the wrong one def do_db(torrent_db, mypref_db, hash): torrent_id = self.torrent_db.getTorrentID(hash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") torrent_id = self.torrent_db.getTorrentIDRoot(hash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") if self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, hash), priority=1024) def get_downloads(self): """ Called by any thread """ self.sesslock.acquire() try: return self.downloads.values() # copy, is mutable finally: self.sesslock.release() def get_download(self, hash): """ Called by any thread """ self.sesslock.acquire() try: return self.downloads.get(hash, None) finally: self.sesslock.release() def download_exists(self, infohash): self.sesslock.acquire() try: return infohash in self.downloads finally: self.sesslock.release() def rawserver_fatalerrorfunc(self, e): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: RawServer fatal error func called", e print_exc() def rawserver_nonfatalerrorfunc(self, e): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: RawServer non fatal error func called", e print_exc() # Could log this somewhere, or phase it out def _run(self): """ Called only once by network thread """ try: try: self.start_upnp() self.multihandler.listen_forever() except: print_exc() finally: self.stop_upnp() self.rawserver.shutdown() def rawserver_keepalive(self): """ Hack to prevent rawserver sleeping in select() for a long time, not processing any tasks on its queue at startup time Called by network thread """ self.rawserver.add_task(self.rawserver_keepalive, 1) # # State retrieval # def set_download_states_callback(self, usercallback, getpeerlist, when=0.0): """ Called by any thread """ self.sesslock.acquire() try: # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() finally: self.sesslock.release() for d in dllist: # Arno, 2012-05-23: At Niels' request to get total transferred # stats. Causes MOREINFO message to be sent from swift proc # for every initiated dl. # 2012-07-31: Turn MOREINFO on/off on demand for efficiency. # 2013-04-17: Libtorrent now uses set_moreinfo_stats as well. d.set_moreinfo_stats(True in getpeerlist or d.get_def().get_id() in getpeerlist) network_set_download_states_callback_lambda = lambda: self.network_set_download_states_callback(usercallback) self.rawserver.add_task(network_set_download_states_callback_lambda, when) def network_set_download_states_callback(self, usercallback): """ Called by network thread """ self.sesslock.acquire() try: # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() finally: self.sesslock.release() dslist = [] for d in dllist: try: ds = d.network_get_state(None, False, sessioncalling=True) dslist.append(ds) except: # Niels, 2012-10-18: If Swift connection is crashing, it will raise an exception # We're catching it here to continue building the downloadstates print_exc() # Invoke the usercallback function via a new thread. # After the callback is invoked, the return values will be passed to # the returncallback for post-callback processing. self.session.uch.perform_getstate_usercallback(usercallback, dslist, self.sesscb_set_download_states_returncallback) def sesscb_set_download_states_returncallback(self, usercallback, when, newgetpeerlist): """ Called by SessionCallbackThread """ if when > 0.0: # reschedule self.set_download_states_callback(usercallback, newgetpeerlist, when=when) # # Persistence methods # def load_checkpoint(self, initialdlstatus=None, initialdlstatus_dict={}): """ Called by any thread """ if not self.initComplete: network_load_checkpoint_callback_lambda = lambda: self.load_checkpoint(initialdlstatus, initialdlstatus_dict) self.rawserver.add_task(network_load_checkpoint_callback_lambda, 1.0) else: self.sesslock.acquire() filelist = [] try: dir = self.session.get_downloads_pstate_dir() filelist = os.listdir(dir) filelist = [os.path.join(dir, filename) for filename in filelist if filename.endswith('.pickle')] finally: self.sesslock.release() for i, filename in enumerate(filelist): shouldCommit = i + 1 == len(filelist) self.resume_download(filename, initialdlstatus, initialdlstatus_dict, commit=shouldCommit, setupDelay=i * 0.1) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume sesslock already held """ try: dir = self.session.get_downloads_pstate_dir() basename = binascii.hexlify(infohash) + '.pickle' filename = os.path.join(dir, basename) return self.load_download_pstate(filename) except Exception as e: # TODO: remove saved checkpoint? # self.rawserver_nonfatalerrorfunc(e) return None def resume_download(self, filename, initialdlstatus=None, initialdlstatus_dict={}, commit=True, setupDelay=0): tdef = sdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC if SwiftDef.is_swift_url(pstate['metainfo']): sdef = SwiftDef.load_from_url(pstate['metainfo']) elif 'infohash' in pstate['metainfo']: tdef = TorrentDefNoMetainfo(pstate['metainfo']['infohash'], pstate['metainfo']['name']) else: tdef = TorrentDef.load_from_dict(pstate['metainfo']) dlconfig = pstate['dlconfig'] if isinstance(dlconfig['saveas'], tuple): dlconfig['saveas'] = dlconfig['saveas'][-1] if sdef and 'name' in dlconfig and isinstance(dlconfig['name'], basestring): sdef.set_name(dlconfig['name']) if sdef and sdef.get_tracker().startswith("127.0.0.1:"): current_port = int(sdef.get_tracker().split(":")[1]) if current_port != self.session.get_swift_dht_listen_port(): print >> sys.stderr, "Modified SwiftDef to new tracker port" sdef.set_tracker("127.0.0.1:%d" % self.session.get_swift_dht_listen_port()) dscfg = DownloadStartupConfig(dlconfig) except: print_exc() # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-7]) torrent = self.torrent_db.getTorrent(infohash, keys=['name', 'torrent_file_name', 'swift_torrent_hash'], include_mypref=False) torrentfile = None if torrent: torrent_dir = self.session.get_torrent_collecting_dir() if torrent['swift_torrent_hash']: sdef = SwiftDef(torrent['swift_torrent_hash']) save_name = sdef.get_roothash_as_hex() torrentfile = os.path.join(torrent_dir, save_name) if torrentfile and os.path.isfile(torrentfile): # normal torrentfile is not present, see if readable torrent is there save_name = get_readable_torrent_name(infohash, torrent['name']) torrentfile = os.path.join(torrent_dir, save_name) if torrentfile and os.path.isfile(torrentfile): tdef = TorrentDef.load(torrentfile) defaultDLConfig = DefaultDownloadStartupConfig.getInstance() dscfg = defaultDLConfig.copy() if self.mypref_db != None: preferences = self.mypref_db.getMyPrefStatsInfohash(infohash) if preferences: if os.path.isdir(preferences[2]) or preferences[2] == '': dscfg.set_dest_dir(preferences[2]) if DEBUG: print >> sys.stderr, "tlm: load_checkpoint: pstate is", dlstatus_strings[pstate['dlstate']['status']], pstate['dlstate']['progress'] if pstate['engineresumedata'] is None: print >> sys.stderr, "tlm: load_checkpoint: resumedata None" else: print >> sys.stderr, "tlm: load_checkpoint: resumedata len", len(pstate['engineresumedata']) if (tdef or sdef) and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists((tdef or sdef).get_id()): if tdef: initialdlstatus = initialdlstatus_dict.get(tdef.get_id(), initialdlstatus) self.add(tdef, dscfg, pstate, initialdlstatus, commit=commit, setupDelay=setupDelay) else: initialdlstatus = initialdlstatus_dict.get(sdef.get_id(), initialdlstatus) self.swift_add(sdef, dscfg, pstate, initialdlstatus) else: print >> sys.stderr, "tlm: not resuming checkpoint because download has already been added" except Exception as e: self.rawserver_nonfatalerrorfunc(e) else: print >> sys.stderr, "tlm: removing checkpoint", filename, "destdir is", dscfg.get_dest_dir() os.remove(filename) else: print >> sys.stderr, "tlm: could not resume checkpoint", filename, tdef, dscfg def checkpoint(self, stop=False, checkpoint=True, gracetime=2.0): """ Called by any thread, assume sesslock already held """ # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() if DEBUG or stop: print >> sys.stderr, "tlm: checkpointing", len(dllist), "stopping", stop network_checkpoint_callback_lambda = lambda: self.network_checkpoint_callback(dllist, stop, checkpoint, gracetime) self.rawserver.add_task(network_checkpoint_callback_lambda, 0.0) # TODO: checkpoint overlayapps / friendship msg handler def network_checkpoint_callback(self, dllist, stop, checkpoint, gracetime): """ Called by network thread """ if checkpoint: for d in dllist: try: # Tell all downloads to stop, and save their persistent state # in a infohash -> pstate dict which is then passed to the user # for storage. # if stop: (infohash, pstate) = d.network_stop(False, False) else: (infohash, pstate) = d.network_checkpoint() if DEBUG: print >> sys.stderr, "tlm: network checkpointing:", d.get_def().get_name(), pstate self.save_download_pstate(infohash, pstate) except Exception as e: self.rawserver_nonfatalerrorfunc(e) if stop: # Some grace time for early shutdown tasks if self.shutdownstarttime is not None: now = timemod.time() diff = now - self.shutdownstarttime if diff < gracetime: print >> sys.stderr, "tlm: shutdown: delaying for early shutdown tasks", gracetime - diff delay = gracetime - diff network_shutdown_callback_lambda = lambda: self.network_shutdown() self.rawserver.add_task(network_shutdown_callback_lambda, delay) return self.network_shutdown() def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. """ print >> sys.stderr, "tlm: early_shutdown" # Note: sesslock not held self.shutdownstarttime = timemod.time() if self.rtorrent_handler: self.rtorrent_handler.shutdown() self.rtorrent_handler.delInstance() if self.torrent_checking: self.torrent_checking.shutdown() self.torrent_checking.delInstance() if self.dispersy: print >> sys.stderr, "lmc: Dispersy shutdown", "[%d]" % id(self.dispersy) self.dispersy.stop(666.666) else: self.database_thread.shutdown(True) if self.session.get_megacache(): self.peer_db.delInstance() self.torrent_db.delInstance() self.mypref_db.delInstance() self.votecast_db.delInstance() self.channelcast_db.delInstance() self.nb_db.delInstance() self.ue_db.delInstance() self.cat.delInstance() self.term.delInstance() from Tribler.Core.CacheDB.sqlitecachedb import unregister unregister() # SWIFTPROC if self.spm is not None: self.spm.early_shutdown() if self.mainline_dht: from Tribler.Core.DecentralizedTracking import mainlineDHT mainlineDHT.deinit(self.mainline_dht) def network_shutdown(self): try: print >> sys.stderr, "tlm: network_shutdown" # Arno, 2012-07-04: Obsolete, each thread must close the DBHandler # it uses in its own shutdown procedure. There is no global close # of all per-thread cursors/connections. # # cachedb.done() # SWIFTPROC if self.spm is not None: self.spm.network_shutdown() ts = enumerate_threads() print >> sys.stderr, "tlm: Number of threads still running", len(ts) for t in ts: print >> sys.stderr, "tlm: Thread still running", t.getName(), "daemon", t.isDaemon(), "instance:", t except: print_exc() # Stop network thread self.sessdoneflag.set() # Arno, 2010-08-09: Stop Session pool threads only after gracetime self.session.uch.shutdown() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr: self.ltmgr.shutdown() self.ltmgr.delInstance() def save_download_pstate(self, infohash, pstate): """ Called by network thread """ basename = binascii.hexlify(infohash) + '.pickle' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if DEBUG: print >> sys.stderr, "tlm: network checkpointing: to file", filename f = open(filename, "wb") pickle.dump(pstate, f) f.close() def load_download_pstate(self, filename): """ Called by any thread """ f = open(filename, "rb") pstate = pickle.load(f) f.close() return pstate def run(self): if prctlimported: prctl.set_name("Tribler" + currentThread().getName()) if not self.initComplete: self.init() if PROFILE: fname = "profile-%s" % self.getName() import cProfile cProfile.runctx("self._run()", globals(), locals(), filename=fname) import pstats print >> sys.stderr, "profile: data for %s" % self.getName() pstats.Stats(fname, stream=sys.stderr).sort_stats("cumulative").print_stats(20) else: self._run() def start_upnp(self): if self.ltmgr: self.set_activity(NTFY_ACT_UPNP) for port, protocol in self.upnp_ports: if DEBUG: print >> sys.stderr, "tlm: adding upnp mapping for %d %s" % (port, protocol) self.ltmgr.add_mapping(port, protocol) def stop_upnp(self): if self.ltmgr: self.ltmgr.delete_mappings() # Events from core meant for API user # def dialback_reachable_callback(self): """ Called by overlay+network thread """ self.session.uch.notify(NTFY_REACHABLE, NTFY_INSERT, None, '') def set_activity(self, type, str='', arg2=None): """ Called by overlay + network thread """ # print >>sys.stderr,"tlm: set_activity",type,str,arg2 self.session.uch.notify(NTFY_ACTIVITIES, NTFY_INSERT, type, str, arg2) def network_vod_event_callback(self, videoinfo, event, params): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: network_vod_event_callback: event %s, params %s" % (event, params) # Call Session threadpool to call user's callback try: videoinfo['usercallback'](event, params) except: print_exc() def update_torrent_checking_period(self): # dynamically change the interval: update at least once per day if self.rtorrent_handler: ntorrents = self.rtorrent_handler.num_torrents if ntorrents > 0: self.torrent_checking_period = min(max(86400 / ntorrents, 30), 300) # print >> sys.stderr, "torrent_checking_period", self.torrent_checking_period def run_torrent_check(self): """ Called by network thread """ self.update_torrent_checking_period() self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) try: self.torrent_checking.setInterval(self.torrent_checking_period) except Exception as e: print_exc() self.rawserver_nonfatalerrorfunc(e) # SWIFTPROC def swift_add(self, sdef, dscfg, pstate=None, initialdlstatus=None, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if self.spm is None: raise OperationNotEnabledByConfigurationException() roothash = sdef.get_roothash() # Check if running or saved on disk if roothash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Swift.SwiftDownloadImpl import SwiftDownloadImpl d = SwiftDownloadImpl(self.session, sdef) # Store in list of Downloads, always. self.downloads[roothash] = d d.setup(dscfg, pstate, initialdlstatus, None, self.network_vod_event_callback) finally: self.sesslock.release() def do_db(torrent_db, mypref_db, roothash, sdef, d): torrent_id = torrent_db.addOrGetTorrentIDRoot(roothash, sdef.get_name()) # TODO: if user renamed the dest_path for single-file-torrent dest_path = d.get_dest_dir() data = {'destination_path': dest_path} mypref_db.addMyPreference(torrent_id, data) if d and not hidden and self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, roothash, sdef, d)) return d def swift_remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ self.sesslock.acquire() try: # SWIFTPROC: remove before stop_remove, to ensure that content # removal works (for torrents, stopping is delegate to network # so all this code happens fast before actual removal. For swift not. roothash = d.get_def().get_roothash() if roothash in self.downloads: del self.downloads[roothash] d.stop_remove(True, removestate=removestate, removecontent=removecontent) finally: self.sesslock.release() def do_db(torrent_db, my_prefdb, roothash): torrent_id = self.torrent_db.getTorrentIDRoot(roothash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") if not hidden and self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, roothash), priority=1024)
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self.ipv8 = None self.ipv8_start_time = 0 self.state_cb_count = 0 self.previous_active_downloads = [] self.download_states_lc = None self.get_peer_list = [] self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.session_lock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.api_manager = None self.watch_folder = None self.version_check_manager = None self.resource_monitor = None self.category = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.video_server = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None self.trustchain_community = None self.wallets = {} self.popularity_community = None self.startup_deferred = Deferred() self.credit_mining_manager = None self.market_community = None self.dht_community = None self.payout_manager = None self.mds = None def register(self, session, session_lock): assert isInIOThread() if not self.registered: self.registered = True self.session = session self.session_lock = session_lock # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root # certificates provided by the system trust store. if sys.platform == 'darwin': os.environ['SSL_CERT_FILE'] = os.path.join(get_lib_path(), 'root_certs_mac.pem') if self.session.config.get_torrent_store_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore(self.session.config.get_torrent_store_dir()) if not self.torrent_store.get_db(): raise RuntimeError("Torrent store (leveldb) is None which should not normally happen") if self.session.config.get_metadata_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore(self.session.config.get_metadata_store_dir()) if not self.metadata_store.get_db(): raise RuntimeError("Metadata store (leveldb) is None which should not normally happen") # torrent collecting: RemoteTorrentHandler if self.session.config.get_torrent_collecting_enabled() and self.session.config.get_dispersy_enabled(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.config.get_megacache_enabled(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import (PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Core.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.config.get_state_dir()) self.category = Category() # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) if self.session.config.get_video_server_enabled(): self.video_server = VideoServer(self.session.config.get_video_server_port(), self.session) self.video_server.start() # IPv8 if self.session.config.get_ipv8_enabled(): from Tribler.pyipv8.ipv8.configuration import get_default_configuration ipv8_config = get_default_configuration() ipv8_config['port'] = self.session.config.get_dispersy_port() ipv8_config['address'] = self.session.config.get_ipv8_address() ipv8_config['overlays'] = [] ipv8_config['keys'] = [] # We load the keys ourselves if self.session.config.get_ipv8_bootstrap_override(): import Tribler.pyipv8.ipv8.deprecated.community as community_file community_file._DEFAULT_ADDRESSES = [self.session.config.get_ipv8_bootstrap_override()] community_file._DNS_ADDRESSES = [] self.ipv8 = IPv8(ipv8_config, enable_statistics=self.session.config.get_ipv8_statistics()) self.session.config.set_anon_proxy_settings(2, ("127.0.0.1", self.session. config.get_tunnel_community_socks5_listen_ports())) # Dispersy self.tftp_handler = None if self.session.config.get_dispersy_enabled(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import MIMEndpoint from Tribler.dispersy.endpoint import IPv8toDispersyAdapter # set communication endpoint if self.session.config.get_ipv8_enabled(): dispersy_endpoint = IPv8toDispersyAdapter(self.ipv8.endpoint) else: dispersy_endpoint = MIMEndpoint(self.session.config.get_dispersy_port()) working_directory = unicode(self.session.config.get_state_dir()) self.dispersy = Dispersy(dispersy_endpoint, working_directory) self.dispersy.statistics.enable_debug_statistics(False) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, dispersy_endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() # Torrent search if self.session.config.get_torrent_search_enabled() or self.session.config.get_channel_search_enabled(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if not self.initComplete: self.init() self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED]) self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None) return self.startup_deferred def on_tribler_started(self, subject, changetype, objectID, *args): reactor.callFromThread(self.startup_deferred.callback, None) def load_ipv8_overlays(self): # Discovery Community with open(self.session.config.get_permid_keypair_filename(), 'r') as key_file: content = key_file.read() content = content[31:-30].replace('\n', '').decode("BASE64") peer = Peer(M2CryptoSK(keystring=content)) discovery_community = DiscoveryCommunity(peer, self.ipv8.endpoint, self.ipv8.network) discovery_community.resolve_dns_bootstrap_addresses() self.ipv8.overlays.append(discovery_community) self.ipv8.strategies.append((RandomChurn(discovery_community), -1)) if not self.session.config.get_dispersy_enabled(): self.ipv8.strategies.append((RandomWalk(discovery_community), 20)) if self.session.config.get_testnet(): peer = Peer(self.session.trustchain_testnet_keypair) else: peer = Peer(self.session.trustchain_keypair) # TrustChain Community if self.session.config.get_trustchain_enabled(): from Tribler.pyipv8.ipv8.attestation.trustchain.community import TrustChainCommunity, \ TrustChainTestnetCommunity community_cls = TrustChainTestnetCommunity if self.session.config.get_testnet() else TrustChainCommunity self.trustchain_community = community_cls(peer, self.ipv8.endpoint, self.ipv8.network, working_directory=self.session.config.get_state_dir()) self.ipv8.overlays.append(self.trustchain_community) self.ipv8.strategies.append((EdgeWalk(self.trustchain_community), 20)) tc_wallet = TrustchainWallet(self.trustchain_community) self.wallets[tc_wallet.get_identifier()] = tc_wallet # DHT Community if self.session.config.get_dht_enabled(): from Tribler.pyipv8.ipv8.dht.discovery import DHTDiscoveryCommunity self.dht_community = DHTDiscoveryCommunity(peer, self.ipv8.endpoint, self.ipv8.network) self.ipv8.overlays.append(self.dht_community) self.ipv8.strategies.append((RandomWalk(self.dht_community), 20)) # Tunnel Community if self.session.config.get_tunnel_community_enabled(): from Tribler.community.triblertunnel.community import TriblerTunnelCommunity, TriblerTunnelTestnetCommunity community_cls = TriblerTunnelTestnetCommunity if self.session.config.get_testnet() else \ TriblerTunnelCommunity if self.mainline_dht: dht_provider = MainlineDHTProvider(self.mainline_dht, self.session.config.get_dispersy_port()) else: dht_provider = DHTCommunityProvider(self.dht_community, self.session.config.get_dispersy_port()) self.tunnel_community = community_cls(peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, dht_provider=dht_provider, bandwidth_wallet=self.wallets["MB"]) self.ipv8.overlays.append(self.tunnel_community) self.ipv8.strategies.append((RandomWalk(self.tunnel_community), 20)) # Market Community if self.session.config.get_market_community_enabled() and self.session.config.get_dht_enabled(): from Tribler.community.market.community import MarketCommunity, MarketTestnetCommunity community_cls = MarketTestnetCommunity if self.session.config.get_testnet() else MarketCommunity self.market_community = community_cls(peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, trustchain=self.trustchain_community, dht=self.dht_community, wallets=self.wallets, working_directory=self.session.config.get_state_dir()) self.ipv8.overlays.append(self.market_community) self.ipv8.strategies.append((RandomWalk(self.market_community), 20)) # Popular Community if self.session.config.get_popularity_community_enabled(): from Tribler.community.popularity.community import PopularityCommunity self.popularity_community = PopularityCommunity(peer, self.ipv8.endpoint, self.ipv8.network, torrent_db=self.session.lm.torrent_db, session=self.session) self.ipv8.overlays.append(self.popularity_community) self.ipv8.strategies.append((RandomWalk(self.popularity_community), 20)) self.popularity_community.start() def enable_ipv8_statistics(self): if self.session.config.get_ipv8_statistics(): for overlay in self.ipv8.overlays: self.ipv8.endpoint.enable_community_statistics(overlay.get_prefix(), True) def load_dispersy_communities(self): self._logger.info("tribler: Preparing Dispersy communities...") now_time = timemod.time() default_kwargs = {'tribler_session': self.session} # Search Community if self.session.config.get_torrent_search_enabled() and self.dispersy: from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load(SearchCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # AllChannel Community if self.session.config.get_channel_search_enabled() and self.dispersy: from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load(AllChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # Channel Community if self.session.config.get_channel_community_enabled() and self.dispersy: from Tribler.community.channel.community import ChannelCommunity self.dispersy.define_auto_load(ChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # PreviewChannel Community if self.session.config.get_preview_channel_community_enabled() and self.dispersy: from Tribler.community.channel.preview import PreviewChannelCommunity self.dispersy.define_auto_load(PreviewChannelCommunity, self.session.dispersy_member, kargs=default_kwargs) self._logger.info("tribler: communities are ready in %.2f seconds", timemod.time() - now_time) def init(self): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") self.session.readable_status = STATE_STARTING_DISPERSY now = timemod.time() success = self.dispersy.start(self.session.autoload_discovery) diff = timemod.time() - now if success: self._logger.info("lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info("lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK private_key = self.dispersy.crypto.key_to_bin( M2CryptoSK(filename=self.session.config.get_permid_keypair_filename())) self.session.dispersy_member = blockingCallFromThread(reactor, self.dispersy.get_member, private_key=private_key) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.config.get_megacache_enabled(): self.dispersy.database.attach_commit_callback(self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) self.session.readable_status = STATE_LOADING_COMMUNITIES # We should load the mainline DHT before loading the IPv8 overlays since the DHT is used for the tunnel overlay. if self.session.config.get_mainline_dht_enabled(): self.session.readable_status = STATE_START_MAINLINE_DHT from Tribler.Core.DecentralizedTracking import mainlineDHT self.mainline_dht = mainlineDHT.init(('127.0.0.1', self.session.config.get_mainline_dht_port()), self.session.config.get_state_dir()) self.upnp_ports.append((self.session.config.get_mainline_dht_port(), 'UDP')) # Wallets if self.session.config.get_bitcoinlib_enabled(): try: from Tribler.Core.Modules.wallet.btc_wallet import BitcoinWallet, BitcoinTestnetWallet wallet_path = os.path.join(self.session.config.get_state_dir(), 'wallet') btc_wallet = BitcoinWallet(wallet_path) btc_testnet_wallet = BitcoinTestnetWallet(wallet_path) self.wallets[btc_wallet.get_identifier()] = btc_wallet self.wallets[btc_testnet_wallet.get_identifier()] = btc_testnet_wallet except ImportError: self._logger.error("bitcoinlib library cannot be found, Bitcoin wallet not available!") if self.session.config.get_dummy_wallets_enabled(): # For debugging purposes, we create dummy wallets dummy_wallet1 = DummyWallet1() self.wallets[dummy_wallet1.get_identifier()] = dummy_wallet1 dummy_wallet2 = DummyWallet2() self.wallets[dummy_wallet2.get_identifier()] = dummy_wallet2 if self.ipv8: self.ipv8_start_time = time.time() self.load_ipv8_overlays() self.enable_ipv8_statistics() if self.dispersy: self.load_dispersy_communities() tunnel_community_ports = self.session.config.get_tunnel_community_socks5_listen_ports() self.session.config.set_anon_proxy_settings(2, ("127.0.0.1", tunnel_community_ports)) if self.session.config.get_channel_search_enabled() and self.session.config.get_dispersy_enabled(): self.session.readable_status = STATE_INITIALIZE_CHANNEL_MGR from Tribler.Core.Modules.channel.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if self.session.config.get_libtorrent_enabled(): self.session.readable_status = STATE_START_LIBTORRENT from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() for port, protocol in self.upnp_ports: self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.config.get_torrent_checking_enabled(): self.session.readable_status = STATE_START_TORRENT_CHECKER self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() if self.rtorrent_handler and self.session.config.get_dispersy_enabled(): self.session.readable_status = STATE_START_REMOTE_TORRENT_HANDLER self.rtorrent_handler.initialize() if self.api_manager: self.session.readable_status = STATE_START_API_ENDPOINTS self.api_manager.root_endpoint.start_endpoints() if self.session.config.get_watch_folder_enabled(): self.session.readable_status = STATE_START_WATCH_FOLDER self.watch_folder = WatchFolder(self.session) self.watch_folder.start() if self.session.config.get_credit_mining_enabled(): self.session.readable_status = STATE_START_CREDIT_MINING from Tribler.Core.CreditMining.CreditMiningManager import CreditMiningManager self.credit_mining_manager = CreditMiningManager(self.session) if self.session.config.get_resource_monitor_enabled(): self.resource_monitor = ResourceMonitor(self.session) self.resource_monitor.start() if self.session.config.get_version_checker_enabled(): self.version_check_manager = VersionCheckManager(self.session) self.version_check_manager.start() if self.session.config.get_chant_enabled(): channels_dir = os.path.join(self.session.config.get_chant_channels_dir()) database_path = os.path.join(self.session.config.get_state_dir(), 'sqlite', 'metadata.db') self.mds = MetadataStore(database_path, channels_dir, self.session.trustchain_keypair) self.session.set_download_states_callback(self.sesscb_states_callback) if self.session.config.get_ipv8_enabled() and self.session.config.get_trustchain_enabled(): self.payout_manager = PayoutManager(self.trustchain_community, self.dht_community) self.initComplete = True def on_channel_download_finished(self, download, channel_id, finished_deferred=None): if download.get_channel_download(): channel_dirname = os.path.join(self.session.lm.mds.channels_dir, download.get_def().get_name()) self.mds.process_channel_dir(channel_dirname, channel_id) if finished_deferred: finished_deferred.callback(download) @db_session def update_channel(self, payload): """ We received some channel metadata, possibly over the network. Validate the signature, update the local metadata store and start downloading this channel if needed. :param payload: The channel metadata, in serialized form. """ if not payload.has_valid_signature(): raise InvalidSignatureException("The signature of the channel metadata is invalid.") channel = self.mds.ChannelMetadata.get_channel_with_id(payload.public_key) if channel: if float2time(payload.timestamp) > channel.timestamp: # Update the channel that is already there. self._logger.info("Updating channel metadata %s ts %s->%s", str(channel.public_key).encode("hex"), str(channel.timestamp), str(float2time(payload.timestamp))) channel.set(**ChannelMetadataPayload.to_dict(payload)) else: # Add new channel object to DB channel = self.mds.ChannelMetadata.from_payload(payload) channel.subscribed = True if channel.version > channel.local_version: self._logger.info("Downloading new channel version %s ver %i->%i", str(channel.public_key).encode("hex"), channel.local_version, channel.version) #TODO: handle the case where the local version is the same as the new one and is not seeded return self.download_channel(channel) def download_channel(self, channel): """ Download a channel with a given infohash and title. :param channel: The channel metadata ORM object. """ finished_deferred = Deferred() dcfg = DownloadStartupConfig() dcfg.set_dest_dir(self.mds.channels_dir) dcfg.set_channel_download(True) tdef = TorrentDefNoMetainfo(infohash=str(channel.infohash), name=channel.title) download = self.session.start_download_from_tdef(tdef, dcfg) channel_id = channel.public_key download.finished_callback = lambda dl: self.on_channel_download_finished(dl, channel_id, finished_deferred) return download, finished_deferred def updated_my_channel(self, new_torrent_path): """ Notify the core that we updated our channel. :param new_torrent_path: path to the new torrent file """ # Start the new download tdef = TorrentDef.load(new_torrent_path) dcfg = DownloadStartupConfig() dcfg.set_dest_dir(self.mds.channels_dir) dcfg.set_channel_download(True) self.add(tdef, dcfg) def add(self, tdef, dscfg, pstate=None, setupDelay=0, hidden=False, share_mode=False, checkpoint_disabled=False): """ Called by any thread """ d = None with self.session_lock: if not isinstance(tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Create the destination directory if it does not exist yet try: if not os.path.isdir(dscfg.get_dest_dir()): os.makedirs(dscfg.get_dest_dir()) except OSError: self._logger.error("Unable to create the download destination directory.") if dscfg.get_time_added() == 0: dscfg.set_time_added(int(timemod.time())) # Check if running or saved on disk if infohash in self.downloads: self._logger.info("Torrent already exists in the downloads. Infohash:%s", infohash.encode('hex')) from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d setup_deferred = d.setup(dscfg, pstate, wrapperDelay=setupDelay, share_mode=share_mode, checkpoint_disabled=checkpoint_disabled) setup_deferred.addCallback(self.on_download_handle_created) if d and not hidden and self.session.config.get_megacache_enabled(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) self.torrent_db._db.commit_now() write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent(tdef, extra_info={'status': 'good'}) write_my_pref() return d def on_download_handle_created(self, download): """ This method is called when the download handle has been created. Immediately checkpoint the download and write the resume data. """ return download.checkpoint() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ out = None with self.session_lock: out = d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) if self.tunnel_community: self.tunnel_community.on_download_removed(d) return out or succeed(None) def remove_id(self, infohash): @forceDBThread def do_db(): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.config.get_megacache_enabled(): do_db() def get_downloads(self): """ Called by any thread """ with self.session_lock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.session_lock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.session_lock: return infohash in self.downloads @inlineCallbacks def update_download_hops(self, download, new_hops): """ Update the amount of hops for a specified download. This can be done on runtime. """ infohash = binascii.hexlify(download.tdef.get_infohash()) self._logger.info("Updating the amount of hops of download %s", infohash) pstate = download.get_persistent_download_config() pstate.set('state', 'engineresumedata', (yield download.save_resume_data())) yield self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(new_hops) # If the user wants to change the hop count to 0, don't automatically bump this up to 1 anymore dscfg.set_safe_seeding(False) self.session.start_download_from_tdef(download.tdef, dscfg, pstate=pstate) def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch(torrent_id, new_trackers) self.session.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.config.get_megacache_enabled(): update_trackers_db(infohash, new_trackers) elif not isinstance(old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def stop_download_states_callback(self): """ Stop any download states callback if present. """ if self.is_pending_task_active("download_states_lc"): self.cancel_pending_task("download_states_lc") def set_download_states_callback(self, user_callback, interval=1.0): """ Set the download state callback. Remove any old callback if it's present. """ self.stop_download_states_callback() self._logger.debug("Starting the download state callback with interval %f", interval) self.download_states_lc = self.register_task("download_states_lc", LoopingCall(self._invoke_states_cb, user_callback)) self.download_states_lc.start(interval) def _invoke_states_cb(self, callback): """ Invoke the download states callback with a list of the download states. """ dslist = [] for d in self.downloads.values(): d.set_moreinfo_stats(True in self.get_peer_list or d.get_def().get_infohash() in self.get_peer_list) ds = d.network_get_state(None) dslist.append(ds) def on_cb_done(new_get_peer_list): self.get_peer_list = new_get_peer_list return deferToThread(callback, dslist).addCallback(on_cb_done) def sesscb_states_callback(self, states_list): """ This method is periodically (every second) called with a list of the download states of the active downloads. """ self.state_cb_count += 1 # Check to see if a download has finished new_active_downloads = [] do_checkpoint = False seeding_download_list = [] for ds in states_list: state = ds.get_status() download = ds.get_download() tdef = download.get_def() safename = tdef.get_name_as_unicode() infohash = tdef.get_infohash() if state == DLSTATUS_DOWNLOADING: new_active_downloads.append(infohash) elif state == DLSTATUS_STOPPED_ON_ERROR: self._logger.error("Error during download: %s", repr(ds.get_error())) if self.download_exists(infohash): self.get_download(infohash).stop() self.session.notifier.notify(NTFY_TORRENT, NTFY_ERROR, infohash, repr(ds.get_error())) elif state == DLSTATUS_SEEDING: seeding_download_list.append({u'infohash': infohash, u'download': download}) if infohash in self.previous_active_downloads: self.session.notifier.notify(NTFY_TORRENT, NTFY_FINISHED, infohash, safename) do_checkpoint = True elif download.get_hops() == 0 and download.get_safe_seeding(): # Re-add the download with anonymity enabled hops = self.session.config.get_default_number_hops() self.update_download_hops(download, hops) # Check the peers of this download every five seconds and add them to the payout manager when # this peer runs a Tribler instance if self.state_cb_count % 5 == 0 and download.get_hops() == 0 and self.payout_manager: for peer in download.get_peerlist(): if peer["extended_version"].startswith('Tribler'): self.payout_manager.update_peer(peer["id"].decode('hex'), infohash, peer["dtotal"]) self.previous_active_downloads = new_active_downloads if do_checkpoint: self.session.checkpoint_downloads() if self.state_cb_count % 4 == 0: if self.tunnel_community: self.tunnel_community.monitor_downloads(states_list) if self.credit_mining_manager: self.credit_mining_manager.monitor_downloads(states_list) return [] # # Persistence methods # def load_checkpoint(self): """ Called by any thread """ def do_load_checkpoint(): with self.session_lock: for i, filename in enumerate(iglob(os.path.join(self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint() else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume session_lock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('download_defaults', 'saveas') and \ isinstance(pstate.get('download_defaults', 'saveas'), tuple): pstate.set('download_defaults', 'saveas', pstate.get('download_defaults', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: try: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance() dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash(infohash) if dest_dir and os.path.isdir(dest_dir): dscfg.set_dest_dir(dest_dir) except ValueError: self._logger.warning("tlm: torrent data invalid") if pstate is not None: has_resume_data = pstate.get('state', 'engineresumedata') is not None self._logger.debug("tlm: load_checkpoint: resumedata %s", 'len %s ' % len(pstate.get('state', 'engineresumedata')) if has_resume_data else 'None') if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if self.download_exists(tdef.get_infohash()): self._logger.info("tlm: not resuming checkpoint because download has already been added") elif dscfg.get_credit_mining() and not self.session.config.get_credit_mining_enabled(): self._logger.info("tlm: not resuming checkpoint since token mining is disabled") else: self.add(tdef, dscfg, pstate, setupDelay=setupDelay) except Exception as e: self._logger.exception("tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint_downloads(self): """ Checkpoints all running downloads in Tribler. Even if the list of Downloads changes in the mean time this is no problem. For removals, dllist will still hold a pointer to the download, and additions are no problem (just won't be included in list of states returned via callback). """ downloads = self.downloads.values() deferred_list = [] self._logger.debug("tlm: checkpointing %s downloads", len(downloads)) for download in downloads: deferred_list.append(download.checkpoint()) return DeferredList(deferred_list) def shutdown_downloads(self): """ Shutdown all downloads in Tribler. """ for download in self.downloads.values(): download.stop() def remove_pstate(self, infohash): def do_remove(): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug("remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning("remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) reactor.callFromThread(do_remove) @inlineCallbacks def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. :returns a Deferred that will fire once all dependencies acknowledge they have shutdown. """ self._logger.info("tlm: early_shutdown") self.shutdown_task_manager() # Note: session_lock not held self.shutdownstarttime = timemod.time() if self.credit_mining_manager: yield self.credit_mining_manager.shutdown() self.credit_mining_manager = None if self.torrent_checker: yield self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: yield self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: yield self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: yield self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.video_server: yield self.video_server.shutdown_server() self.video_server = None if self.version_check_manager: self.version_check_manager.stop() self.version_check_manager = None if self.resource_monitor: self.resource_monitor.stop() self.resource_monitor = None self.tracker_manager = None if self.tftp_handler is not None: yield self.tftp_handler.shutdown() self.tftp_handler = None if self.tunnel_community and self.trustchain_community: # We unload these overlays manually since the TrustChain has to be unloaded after the tunnel overlay. tunnel_community = self.tunnel_community self.tunnel_community = None yield self.ipv8.unload_overlay(tunnel_community) trustchain_community = self.trustchain_community self.trustchain_community = None yield self.ipv8.unload_overlay(trustchain_community) if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = yield self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info("lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info("lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.ipv8: yield self.ipv8.stop(stop_reactor=False) if self.metadata_store is not None: yield self.metadata_store.close() self.metadata_store = None if self.channelcast_db is not None: yield self.channelcast_db.close() self.channelcast_db = None if self.votecast_db is not None: yield self.votecast_db.close() self.votecast_db = None if self.mypref_db is not None: yield self.mypref_db.close() self.mypref_db = None if self.torrent_db is not None: yield self.torrent_db.close() self.torrent_db = None if self.peer_db is not None: yield self.peer_db.close() self.peer_db = None if self.mainline_dht is not None: from Tribler.Core.DecentralizedTracking import mainlineDHT yield mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: yield self.torrent_store.close() self.torrent_store = None if self.watch_folder is not None: yield self.watch_folder.stop() self.watch_folder = None # We close the API manager as late as possible during shutdown. if self.api_manager is not None: yield self.api_manager.stop() self.api_manager = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info("tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr is not None: self.ltmgr.shutdown() self.ltmgr = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ self.downloads[infohash].pstate_for_restart = pstate self.register_anonymous_task("save_pstate", self.downloads[infohash].save_resume_data()) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate
class TriblerLaunchMany(Thread): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ Thread.__init__(self) self.setDaemon(True) self.setName("Network" + self.getName()) self.initComplete = False self.registered = False self.dispersy = None self.database_thread = None def register(self, session, sesslock): if not self.registered: self.registered = True self.session = session self.sesslock = sesslock self.downloads = {} config = session.sessconfig # Should be safe at startup self.upnp_ports = [] # Orig self.sessdoneflag = Event() self.rawserver = RawServer( self.sessdoneflag, config['timeout_check_interval'], config['timeout'], ipv6_enable=config['ipv6_enabled'], failfunc=self.rawserver_fatalerrorfunc, errorfunc=self.rawserver_nonfatalerrorfunc) self.rawserver.add_task(self.rawserver_keepalive, 1) self.listen_port = config['minport'] self.shutdownstarttime = None self.multihandler = MultiHandler(self.rawserver, self.sessdoneflag) # SWIFTPROC swift_exists = config['swiftproc'] and ( os.path.exists(config['swiftpath']) or os.path.exists(config['swiftpath'] + '.exe')) if swift_exists: from Tribler.Core.Swift.SwiftProcessMgr import SwiftProcessMgr self.spm = SwiftProcessMgr( config['swiftpath'], config['swiftcmdlistenport'], config['swiftdlsperproc'], self.session.get_swift_tunnel_listen_port(), self.sesslock) try: self.swift_process = self.spm.get_or_create_sp( self.session.get_swift_working_dir(), self.session.get_torrent_collecting_dir(), self.session.get_swift_tunnel_listen_port(), self.session.get_swift_tunnel_httpgw_listen_port(), self.session.get_swift_tunnel_cmdgw_listen_port()) self.upnp_ports.append( (self.session.get_swift_tunnel_listen_port(), 'UDP')) except OSError: # could not find/run swift print >> sys.stderr, "lmc: could not start a swift process" else: self.spm = None self.swift_process = None # Dispersy self.session.dispersy_member = None if config['dispersy']: from Tribler.dispersy.callback import Callback from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import RawserverEndpoint, TunnelEndpoint from Tribler.dispersy.community import HardKilledCommunity # set communication endpoint if config['dispersy-tunnel-over-swift'] and self.swift_process: endpoint = TunnelEndpoint(self.swift_process) else: endpoint = RawserverEndpoint(self.rawserver, config['dispersy_port']) callback = Callback("Dispersy") # WARNING NAME SIGNIFICANT working_directory = unicode(config['state_dir']) self.dispersy = Dispersy(callback, endpoint, working_directory) # TODO: see if we can postpone dispersy.start to improve GUI responsiveness. # However, for now we must start self.dispersy.callback before running # try_register(nocachedb, self.database_thread)! self.dispersy.start() print >> sys.stderr, "lmc: Dispersy is listening on port", self.dispersy.wan_address[ 1], "using", endpoint self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) self.dispersy.callback.call(self.dispersy.define_auto_load, args=(HardKilledCommunity, ), kargs={'load': True}) # notify dispersy finished loading self.session.uch.notify(NTFY_DISPERSY, NTFY_STARTED, None) from Tribler.Core.permid import read_keypair from Tribler.dispersy.crypto import ec_to_public_bin, ec_to_private_bin keypair = read_keypair( self.session.get_permid_keypair_filename()) self.session.dispersy_member = callback.call( self.dispersy.get_member, (ec_to_public_bin(keypair), ec_to_private_bin(keypair))) self.database_thread = callback else: class FakeCallback(): def __init__(self): from Tribler.Utilities.TimedTaskQueue import TimedTaskQueue self.queue = TimedTaskQueue("FakeCallback") def register(self, call, args=(), kargs=None, delay=0.0, priority=0, id_=u"", callback=None, callback_args=(), callback_kargs=None, include_id=False): def do_task(): if kargs: call(*args, **kargs) else: call(*args) if callback: if callback_kargs: callback(*callback_args, **callback_kargs) else: callback(*callback_args) self.queue.add_task(do_task, t=delay) def shutdown(self, immediately=False): self.queue.shutdown(immediately) self.database_thread = FakeCallback() if config['megacache']: import Tribler.Core.CacheDB.cachedb as cachedb from Tribler.Core.CacheDB.SqliteCacheDBHandler import PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler, NetworkBuzzDBHandler, UserEventLogDBHandler from Tribler.Category.Category import Category from Tribler.Core.Tag.Extraction import TermExtraction from Tribler.Core.CacheDB.sqlitecachedb import try_register if DEBUG: print >> sys.stderr, 'tlm: Reading Session state from', config[ 'state_dir'] nocachedb = cachedb.init(config, self.rawserver_fatalerrorfunc) try_register(nocachedb, self.database_thread) self.cat = Category.getInstance(config['install_dir']) self.term = TermExtraction.getInstance(config['install_dir']) self.peer_db = PeerDBHandler.getInstance() self.peer_db.registerConnectionUpdater(self.session) self.torrent_db = TorrentDBHandler.getInstance() self.torrent_db.register( os.path.abspath(config['torrent_collecting_dir'])) self.mypref_db = MyPreferenceDBHandler.getInstance() self.votecast_db = VoteCastDBHandler.getInstance() self.votecast_db.registerSession(self.session) self.channelcast_db = ChannelCastDBHandler.getInstance() self.channelcast_db.registerSession(self.session) self.nb_db = NetworkBuzzDBHandler.getInstance() self.ue_db = UserEventLogDBHandler.getInstance() if self.dispersy: self.dispersy.database.attach_commit_callback( self.channelcast_db._db.commitNow) else: config['torrent_checking'] = 0 self.rtorrent_handler = None if config['torrent_collecting']: from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler() def init(self): config = self.session.sessconfig # Should be safe at startup self.mainline_dht = None if config['mainline_dht']: from Tribler.Core.DecentralizedTracking import mainlineDHT try: self.mainline_dht = mainlineDHT.init( ('127.0.0.1', config['mainline_dht_port']), config['state_dir'], config['swiftdhtport']) self.upnp_ports.append((config['mainline_dht_port'], 'UDP')) except: print_exc() self.ltmgr = None if config['libtorrent']: from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr( self.session, ignore_singleton=self.session.ignore_singleton) # add task for tracker checking self.torrent_checking = None if config['torrent_checking']: if config['mainline_dht']: # Create torrent-liveliness checker based on DHT from Tribler.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker c = mainlineDHTChecker.getInstance() c.register(self.mainline_dht) try: from Tribler.TrackerChecking.TorrentChecking import TorrentChecking self.torrent_checking_period = config[ 'torrent_checking_period'] self.torrent_checking = TorrentChecking.getInstance( self.torrent_checking_period) self.run_torrent_check() except: print_exc if self.rtorrent_handler: self.rtorrent_handler.register( self.dispersy, self.database_thread, self.session, int(config['torrent_collecting_max_torrents'])) self.initComplete = True def add(self, tdef, dscfg, pstate=None, initialdlstatus=None, commit=True, setupDelay=0, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if not isinstance( tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None and not tdef.get_live(): # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: if DEBUG: print >> sys.stderr, "tlm: add: pstate is", dlstatus_strings[ pstate['dlstate'] ['status']], pstate['dlstate']['progress'] # Store in list of Downloads, always. self.downloads[infohash] = d d.setup(dscfg, pstate, initialdlstatus, self.network_engine_wrapper_created_callback, self.network_vod_event_callback, wrapperDelay=setupDelay) finally: self.sesslock.release() if d and not hidden and self.session.get_megacache(): def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data, commit=commit) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addInfohash(tdef.get_infohash(), commit=commit) self.torrent_db.updateTorrent( tdef.get_infohash(), name=tdef.get_name().encode('utf_8'), commit=commit) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent( tdef, source='', extra_info={'status': 'good'}, commit=commit) write_my_pref() return d def network_engine_wrapper_created_callback(self, d, pstate): """ Called by network thread """ try: if pstate is None: # Checkpoint at startup (infohash, pstate) = d.network_checkpoint() self.save_download_pstate(infohash, pstate) except: print_exc() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ self.sesslock.acquire() try: d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] finally: self.sesslock.release() if not hidden: self.remove_id(infohash) def remove_id(self, hash): # this is a bit tricky, as we do not know if this "id" is a roothash or infohash # however a restart will re-add the preference to mypreference if we remove the wrong one def do_db(torrent_db, mypref_db, hash): torrent_id = self.torrent_db.getTorrentID(hash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") torrent_id = self.torrent_db.getTorrentIDRoot(hash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") if self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, hash), priority=1024) def get_downloads(self): """ Called by any thread """ self.sesslock.acquire() try: return self.downloads.values() # copy, is mutable finally: self.sesslock.release() def get_download(self, hash): """ Called by any thread """ self.sesslock.acquire() try: return self.downloads.get(hash, None) finally: self.sesslock.release() def download_exists(self, infohash): self.sesslock.acquire() try: return infohash in self.downloads finally: self.sesslock.release() def rawserver_fatalerrorfunc(self, e): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: RawServer fatal error func called", e print_exc() def rawserver_nonfatalerrorfunc(self, e): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: RawServer non fatal error func called", e print_exc() # Could log this somewhere, or phase it out def _run(self): """ Called only once by network thread """ try: try: self.start_upnp() self.multihandler.listen_forever() except: print_exc() finally: self.stop_upnp() self.rawserver.shutdown() def rawserver_keepalive(self): """ Hack to prevent rawserver sleeping in select() for a long time, not processing any tasks on its queue at startup time Called by network thread """ self.rawserver.add_task(self.rawserver_keepalive, 1) # # State retrieval # def set_download_states_callback(self, usercallback, getpeerlist, when=0.0): """ Called by any thread """ self.sesslock.acquire() try: # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() finally: self.sesslock.release() for d in dllist: # Arno, 2012-05-23: At Niels' request to get total transferred # stats. Causes MOREINFO message to be sent from swift proc # for every initiated dl. # 2012-07-31: Turn MOREINFO on/off on demand for efficiency. # 2013-04-17: Libtorrent now uses set_moreinfo_stats as well. d.set_moreinfo_stats(True in getpeerlist or d.get_def().get_id() in getpeerlist) network_set_download_states_callback_lambda = lambda: self.network_set_download_states_callback( usercallback) self.rawserver.add_task(network_set_download_states_callback_lambda, when) def network_set_download_states_callback(self, usercallback): """ Called by network thread """ self.sesslock.acquire() try: # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() finally: self.sesslock.release() dslist = [] for d in dllist: try: ds = d.network_get_state(None, False, sessioncalling=True) dslist.append(ds) except: # Niels, 2012-10-18: If Swift connection is crashing, it will raise an exception # We're catching it here to continue building the downloadstates print_exc() # Invoke the usercallback function via a new thread. # After the callback is invoked, the return values will be passed to # the returncallback for post-callback processing. self.session.uch.perform_getstate_usercallback( usercallback, dslist, self.sesscb_set_download_states_returncallback) def sesscb_set_download_states_returncallback(self, usercallback, when, newgetpeerlist): """ Called by SessionCallbackThread """ if when > 0.0: # reschedule self.set_download_states_callback(usercallback, newgetpeerlist, when=when) # # Persistence methods # def load_checkpoint(self, initialdlstatus=None, initialdlstatus_dict={}): """ Called by any thread """ if not self.initComplete: network_load_checkpoint_callback_lambda = lambda: self.load_checkpoint( initialdlstatus, initialdlstatus_dict) self.rawserver.add_task(network_load_checkpoint_callback_lambda, 1.0) else: self.sesslock.acquire() filelist = [] try: dir = self.session.get_downloads_pstate_dir() filelist = os.listdir(dir) filelist = [ os.path.join(dir, filename) for filename in filelist if filename.endswith('.pickle') ] finally: self.sesslock.release() for i, filename in enumerate(filelist): shouldCommit = i + 1 == len(filelist) self.resume_download(filename, initialdlstatus, initialdlstatus_dict, commit=shouldCommit, setupDelay=i * 0.1) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume sesslock already held """ try: dir = self.session.get_downloads_pstate_dir() basename = binascii.hexlify(infohash) + '.pickle' filename = os.path.join(dir, basename) return self.load_download_pstate(filename) except Exception as e: # TODO: remove saved checkpoint? # self.rawserver_nonfatalerrorfunc(e) return None def resume_download(self, filename, initialdlstatus=None, initialdlstatus_dict={}, commit=True, setupDelay=0): tdef = sdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC if SwiftDef.is_swift_url(pstate['metainfo']): sdef = SwiftDef.load_from_url(pstate['metainfo']) elif 'infohash' in pstate['metainfo']: tdef = TorrentDefNoMetainfo(pstate['metainfo']['infohash'], pstate['metainfo']['name']) else: tdef = TorrentDef.load_from_dict(pstate['metainfo']) dlconfig = pstate['dlconfig'] if isinstance(dlconfig['saveas'], tuple): dlconfig['saveas'] = dlconfig['saveas'][-1] if sdef and 'name' in dlconfig and isinstance( dlconfig['name'], basestring): sdef.set_name(dlconfig['name']) if sdef and sdef.get_tracker().startswith("127.0.0.1:"): current_port = int(sdef.get_tracker().split(":")[1]) if current_port != self.session.get_swift_dht_listen_port(): print >> sys.stderr, "Modified SwiftDef to new tracker port" sdef.set_tracker("127.0.0.1:%d" % self.session.get_swift_dht_listen_port()) dscfg = DownloadStartupConfig(dlconfig) except: print_exc() # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-7]) torrent = self.torrent_db.getTorrent( infohash, keys=['name', 'torrent_file_name', 'swift_torrent_hash'], include_mypref=False) torrentfile = None if torrent: torrent_dir = self.session.get_torrent_collecting_dir() if torrent['swift_torrent_hash']: sdef = SwiftDef(torrent['swift_torrent_hash']) save_name = sdef.get_roothash_as_hex() torrentfile = os.path.join(torrent_dir, save_name) if torrentfile and os.path.isfile(torrentfile): # normal torrentfile is not present, see if readable torrent is there save_name = get_readable_torrent_name( infohash, torrent['name']) torrentfile = os.path.join(torrent_dir, save_name) if torrentfile and os.path.isfile(torrentfile): tdef = TorrentDef.load(torrentfile) defaultDLConfig = DefaultDownloadStartupConfig.getInstance() dscfg = defaultDLConfig.copy() if self.mypref_db != None: preferences = self.mypref_db.getMyPrefStatsInfohash( infohash) if preferences: if os.path.isdir( preferences[2]) or preferences[2] == '': dscfg.set_dest_dir(preferences[2]) if DEBUG: print >> sys.stderr, "tlm: load_checkpoint: pstate is", dlstatus_strings[ pstate['dlstate']['status']], pstate['dlstate']['progress'] if pstate['engineresumedata'] is None: print >> sys.stderr, "tlm: load_checkpoint: resumedata None" else: print >> sys.stderr, "tlm: load_checkpoint: resumedata len", len( pstate['engineresumedata']) if (tdef or sdef) and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists((tdef or sdef).get_id()): if tdef: initialdlstatus = initialdlstatus_dict.get( tdef.get_id(), initialdlstatus) self.add(tdef, dscfg, pstate, initialdlstatus, commit=commit, setupDelay=setupDelay) else: initialdlstatus = initialdlstatus_dict.get( sdef.get_id(), initialdlstatus) self.swift_add(sdef, dscfg, pstate, initialdlstatus) else: print >> sys.stderr, "tlm: not resuming checkpoint because download has already been added" except Exception as e: self.rawserver_nonfatalerrorfunc(e) else: print >> sys.stderr, "tlm: removing checkpoint", filename, "destdir is", dscfg.get_dest_dir( ) os.remove(filename) else: print >> sys.stderr, "tlm: could not resume checkpoint", filename, tdef, dscfg def checkpoint(self, stop=False, checkpoint=True, gracetime=2.0): """ Called by any thread, assume sesslock already held """ # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() if DEBUG or stop: print >> sys.stderr, "tlm: checkpointing", len( dllist), "stopping", stop network_checkpoint_callback_lambda = lambda: self.network_checkpoint_callback( dllist, stop, checkpoint, gracetime) self.rawserver.add_task(network_checkpoint_callback_lambda, 0.0) # TODO: checkpoint overlayapps / friendship msg handler def network_checkpoint_callback(self, dllist, stop, checkpoint, gracetime): """ Called by network thread """ if checkpoint: for d in dllist: try: # Tell all downloads to stop, and save their persistent state # in a infohash -> pstate dict which is then passed to the user # for storage. # if stop: (infohash, pstate) = d.network_stop(False, False) else: (infohash, pstate) = d.network_checkpoint() if DEBUG: print >> sys.stderr, "tlm: network checkpointing:", d.get_def( ).get_name(), pstate self.save_download_pstate(infohash, pstate) except Exception as e: self.rawserver_nonfatalerrorfunc(e) if stop: # Some grace time for early shutdown tasks if self.shutdownstarttime is not None: now = timemod.time() diff = now - self.shutdownstarttime if diff < gracetime: print >> sys.stderr, "tlm: shutdown: delaying for early shutdown tasks", gracetime - diff delay = gracetime - diff network_shutdown_callback_lambda = lambda: self.network_shutdown( ) self.rawserver.add_task(network_shutdown_callback_lambda, delay) return self.network_shutdown() def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. """ print >> sys.stderr, "tlm: early_shutdown" # Note: sesslock not held self.shutdownstarttime = timemod.time() if self.rtorrent_handler: self.rtorrent_handler.shutdown() self.rtorrent_handler.delInstance() if self.torrent_checking: self.torrent_checking.shutdown() self.torrent_checking.delInstance() if self.dispersy: print >> sys.stderr, "lmc: Dispersy shutdown", "[%d]" % id( self.dispersy) self.dispersy.stop(666.666) else: self.database_thread.shutdown(True) if self.session.get_megacache(): self.peer_db.delInstance() self.torrent_db.delInstance() self.mypref_db.delInstance() self.votecast_db.delInstance() self.channelcast_db.delInstance() self.nb_db.delInstance() self.ue_db.delInstance() self.cat.delInstance() self.term.delInstance() from Tribler.Core.CacheDB.sqlitecachedb import unregister unregister() # SWIFTPROC if self.spm is not None: self.spm.early_shutdown() if self.mainline_dht: from Tribler.Core.DecentralizedTracking import mainlineDHT mainlineDHT.deinit(self.mainline_dht) def network_shutdown(self): try: print >> sys.stderr, "tlm: network_shutdown" # Arno, 2012-07-04: Obsolete, each thread must close the DBHandler # it uses in its own shutdown procedure. There is no global close # of all per-thread cursors/connections. # # cachedb.done() # SWIFTPROC if self.spm is not None: self.spm.network_shutdown() ts = enumerate_threads() print >> sys.stderr, "tlm: Number of threads still running", len( ts) for t in ts: print >> sys.stderr, "tlm: Thread still running", t.getName( ), "daemon", t.isDaemon(), "instance:", t except: print_exc() # Stop network thread self.sessdoneflag.set() # Arno, 2010-08-09: Stop Session pool threads only after gracetime self.session.uch.shutdown() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr: self.ltmgr.shutdown() self.ltmgr.delInstance() def save_download_pstate(self, infohash, pstate): """ Called by network thread """ basename = binascii.hexlify(infohash) + '.pickle' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if DEBUG: print >> sys.stderr, "tlm: network checkpointing: to file", filename f = open(filename, "wb") pickle.dump(pstate, f) f.close() def load_download_pstate(self, filename): """ Called by any thread """ f = open(filename, "rb") pstate = pickle.load(f) f.close() return pstate def run(self): if prctlimported: prctl.set_name("Tribler" + currentThread().getName()) if not self.initComplete: self.init() if PROFILE: fname = "profile-%s" % self.getName() import cProfile cProfile.runctx("self._run()", globals(), locals(), filename=fname) import pstats print >> sys.stderr, "profile: data for %s" % self.getName() pstats.Stats( fname, stream=sys.stderr).sort_stats("cumulative").print_stats(20) else: self._run() def start_upnp(self): if self.ltmgr: self.set_activity(NTFY_ACT_UPNP) for port, protocol in self.upnp_ports: if DEBUG: print >> sys.stderr, "tlm: adding upnp mapping for %d %s" % ( port, protocol) self.ltmgr.add_mapping(port, protocol) def stop_upnp(self): if self.ltmgr: self.ltmgr.delete_mappings() # Events from core meant for API user # def dialback_reachable_callback(self): """ Called by overlay+network thread """ self.session.uch.notify(NTFY_REACHABLE, NTFY_INSERT, None, '') def set_activity(self, type, str='', arg2=None): """ Called by overlay + network thread """ # print >>sys.stderr,"tlm: set_activity",type,str,arg2 self.session.uch.notify(NTFY_ACTIVITIES, NTFY_INSERT, type, str, arg2) def network_vod_event_callback(self, videoinfo, event, params): """ Called by network thread """ if DEBUG: print >> sys.stderr, "tlm: network_vod_event_callback: event %s, params %s" % ( event, params) # Call Session threadpool to call user's callback try: videoinfo['usercallback'](event, params) except: print_exc() def update_torrent_checking_period(self): # dynamically change the interval: update at least once per day if self.rtorrent_handler: ntorrents = self.rtorrent_handler.num_torrents if ntorrents > 0: self.torrent_checking_period = min(max(86400 / ntorrents, 30), 300) # print >> sys.stderr, "torrent_checking_period", self.torrent_checking_period def run_torrent_check(self): """ Called by network thread """ self.update_torrent_checking_period() self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period) try: self.torrent_checking.setInterval(self.torrent_checking_period) except Exception as e: print_exc() self.rawserver_nonfatalerrorfunc(e) # SWIFTPROC def swift_add(self, sdef, dscfg, pstate=None, initialdlstatus=None, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if self.spm is None: raise OperationNotEnabledByConfigurationException() roothash = sdef.get_roothash() # Check if running or saved on disk if roothash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Swift.SwiftDownloadImpl import SwiftDownloadImpl d = SwiftDownloadImpl(self.session, sdef) # Store in list of Downloads, always. self.downloads[roothash] = d d.setup(dscfg, pstate, initialdlstatus, None, self.network_vod_event_callback) finally: self.sesslock.release() def do_db(torrent_db, mypref_db, roothash, sdef, d): torrent_id = torrent_db.addOrGetTorrentIDRoot( roothash, sdef.get_name()) # TODO: if user renamed the dest_path for single-file-torrent dest_path = d.get_dest_dir() data = {'destination_path': dest_path} mypref_db.addMyPreference(torrent_id, data) if d and not hidden and self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, roothash, sdef, d)) return d def swift_remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ self.sesslock.acquire() try: # SWIFTPROC: remove before stop_remove, to ensure that content # removal works (for torrents, stopping is delegate to network # so all this code happens fast before actual removal. For swift not. roothash = d.get_def().get_roothash() if roothash in self.downloads: del self.downloads[roothash] d.stop_remove(True, removestate=removestate, removecontent=removecontent) finally: self.sesslock.release() def do_db(torrent_db, my_prefdb, roothash): torrent_id = self.torrent_db.getTorrentIDRoot(roothash) if torrent_id: self.mypref_db.updateDestDir(torrent_id, "") if not hidden and self.session.get_megacache(): self.database_thread.register(do_db, args=(self.torrent_db, self.mypref_db, roothash), priority=1024)
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self.state_cb_count = 0 self.previous_active_downloads = [] self.download_states_lc = None self.get_peer_list = [] self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.sesslock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.api_manager = None self.watch_folder = None self.version_check_manager = None self.category = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.video_server = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None self.startup_deferred = Deferred() self.boosting_manager = None def register(self, session, sesslock): assert isInIOThread() if not self.registered: self.registered = True self.session = session self.sesslock = sesslock if self.session.get_torrent_store(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore( self.session.get_torrent_store_dir()) if self.session.get_enable_metadata(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore( self.session.get_metadata_store_dir()) # torrent collecting: RemoteTorrentHandler if self.session.get_torrent_collecting(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.get_megacache(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import ( PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Core.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.get_state_dir()) self.category = Category() # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) self.tracker_manager.initialize() if self.session.get_videoserver_enabled(): self.video_server = VideoServer( self.session.get_videoserver_port(), self.session) self.video_server.start() # Dispersy self.tftp_handler = None if self.session.get_dispersy(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint # set communication endpoint endpoint = StandaloneEndpoint(self.session.get_dispersy_port(), ip=self.session.get_ip()) working_directory = unicode(self.session.get_state_dir()) self.dispersy = Dispersy(endpoint, working_directory) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() if self.session.get_enable_torrent_search( ) or self.session.get_enable_channel_search(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if not self.initComplete: self.init() self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED]) self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None) return self.startup_deferred def on_tribler_started(self, subject, changetype, objectID, *args): reactor.callFromThread(self.startup_deferred.callback, None) @blocking_call_on_reactor_thread def load_communities(self): self._logger.info("tribler: Preparing communities...") now_time = timemod.time() default_kwargs = {'tribler_session': self.session} # Search Community if self.session.get_enable_torrent_search(): from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load(SearchCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # AllChannel Community if self.session.get_enable_channel_search(): from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load(AllChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # Channel Community if self.session.get_channel_community_enabled(): from Tribler.community.channel.community import ChannelCommunity self.dispersy.define_auto_load(ChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # PreviewChannel Community if self.session.get_preview_channel_community_enabled(): from Tribler.community.channel.preview import PreviewChannelCommunity self.dispersy.define_auto_load(PreviewChannelCommunity, self.session.dispersy_member, kargs=default_kwargs) if self.session.get_tunnel_community_enabled(): tunnel_settings = TunnelSettings(tribler_session=self.session) tunnel_kwargs = { 'tribler_session': self.session, 'settings': tunnel_settings } if self.session.get_enable_multichain(): multichain_kwargs = {'tribler_session': self.session} # If the multichain is enabled, we use the permanent multichain keypair # for both the multichain and the tunnel community keypair = self.session.multichain_keypair dispersy_member = self.dispersy.get_member( private_key=keypair.key_to_bin()) from Tribler.community.multichain.community import MultiChainCommunity self.dispersy.define_auto_load(MultiChainCommunity, dispersy_member, load=True, kargs=multichain_kwargs) else: keypair = self.dispersy.crypto.generate_key(u"curve25519") dispersy_member = self.dispersy.get_member( private_key=self.dispersy.crypto.key_to_bin(keypair)) from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity self.tunnel_community = self.dispersy.define_auto_load( HiddenTunnelCommunity, dispersy_member, load=True, kargs=tunnel_kwargs)[0] # We don't want to automatically load other instances of this community with other master members. self.dispersy.undefine_auto_load(HiddenTunnelCommunity) self._logger.info("tribler: communities are ready in %.2f seconds", timemod.time() - now_time) def init(self): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") now = timemod.time() success = self.dispersy.start(self.session.autoload_discovery) diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info( "lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK self.session.dispersy_member = blockingCallFromThread( reactor, self.dispersy.get_member, private_key=self.dispersy.crypto.key_to_bin( M2CryptoSK( filename=self.session.get_permid_keypair_filename()))) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.get_megacache(): self.dispersy.database.attach_commit_callback( self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) self.load_communities() self.session.set_anon_proxy_settings( 2, ("127.0.0.1", self.session.get_tunnel_community_socks5_listen_ports())) if self.session.get_enable_channel_search(): from Tribler.Core.Modules.channel.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if self.session.get_mainline_dht(): from Tribler.Core.DecentralizedTracking import mainlineDHT self.mainline_dht = mainlineDHT.init( ('127.0.0.1', self.session.get_mainline_dht_listen_port()), self.session.get_state_dir()) self.upnp_ports.append( (self.session.get_mainline_dht_listen_port(), 'UDP')) if self.session.get_libtorrent(): from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() for port, protocol in self.upnp_ports: self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.get_torrent_checking(): self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() if self.rtorrent_handler: self.rtorrent_handler.initialize() if self.api_manager: self.api_manager.root_endpoint.start_endpoints() if self.session.get_watch_folder_enabled(): self.watch_folder = WatchFolder(self.session) self.watch_folder.start() if self.session.get_creditmining_enable(): from Tribler.Core.CreditMining.BoostingManager import BoostingManager self.boosting_manager = BoostingManager(self.session) self.version_check_manager = VersionCheckManager(self.session) self.session.set_download_states_callback(self.sesscb_states_callback) self.initComplete = True def add(self, tdef, dscfg, pstate=None, setupDelay=0, hidden=False, share_mode=False, checkpoint_disabled=False): """ Called by any thread """ d = None with self.sesslock: if not isinstance( tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Create the destination directory if it does not exist yet try: if not os.path.isdir(dscfg.get_dest_dir()): os.makedirs(dscfg.get_dest_dir()) except OSError: self._logger.error( "Unable to create the download destination directory.") if dscfg.get_time_added() == 0: dscfg.set_time_added(int(timemod.time())) # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException( "This download already exists.") from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d setup_deferred = d.setup(dscfg, pstate, wrapperDelay=setupDelay, share_mode=share_mode, checkpoint_disabled=checkpoint_disabled) setup_deferred.addCallback(self.on_download_handle_created) if d and not hidden and self.session.get_megacache(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent( tdef, extra_info={'status': 'good'}) write_my_pref() return d def on_download_handle_created(self, download): """ This method is called when the download handle has been created. Immediately checkpoint the download and write the resume data. """ return download.checkpoint() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ with self.sesslock: d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) if self.tunnel_community: self.tunnel_community.on_download_removed(d) def remove_id(self, infohash): @forceDBThread def do_db(): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.get_megacache(): do_db() def get_downloads(self): """ Called by any thread """ with self.sesslock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.sesslock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.sesslock: return infohash in self.downloads def update_download_hops(self, download, new_hops): """ Update the amount of hops for a specified download. This can be done on runtime. """ infohash = binascii.hexlify(download.tdef.get_infohash()) self._logger.info("Updating the amount of hops of download %s", infohash) self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(new_hops) self.register_task( "reschedule_download_%s" % infohash, reactor.callLater(3, self.session.start_download_from_tdef, download.tdef, dscfg)) def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch( torrent_id, new_trackers) self.session.notifier.notify( NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.get_megacache(): update_trackers_db(infohash, new_trackers) elif not isinstance( old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def stop_download_states_callback(self): """ Stop any download states callback if present. """ if self.is_pending_task_active("download_states_lc"): self.cancel_pending_task("download_states_lc") def set_download_states_callback(self, usercallback, interval=1.0): """ Set the download state callback. Remove any old callback if it's present. """ self.stop_download_states_callback() self._logger.debug( "Starting the download state callback with interval %f", interval) self.download_states_lc = self.register_task( "download_states_lc", LoopingCall(self._invoke_states_cb, usercallback)) self.download_states_lc.start(interval) def _invoke_states_cb(self, callback): """ Invoke the download states callback with a list of the download states. """ dslist = [] for d in self.downloads.values(): d.set_moreinfo_stats( True in self.get_peer_list or d.get_def().get_infohash() in self.get_peer_list) ds = d.network_get_state(None, False) dslist.append(ds) def on_cb_done(new_get_peer_list): self.get_peer_list = new_get_peer_list return deferToThread(callback, dslist).addCallback(on_cb_done) def sesscb_states_callback(self, states_list): """ This method is periodically (every second) called with a list of the download states of the active downloads. """ self.state_cb_count += 1 # Check to see if a download has finished new_active_downloads = [] do_checkpoint = False seeding_download_list = [] for ds in states_list: state = ds.get_status() download = ds.get_download() tdef = download.get_def() safename = tdef.get_name_as_unicode() if state == DLSTATUS_DOWNLOADING: new_active_downloads.append(safename) elif state == DLSTATUS_STOPPED_ON_ERROR: self._logger.error("Error during download: %s", repr(ds.get_error())) self.downloads.get(tdef.get_infohash()).stop() self.session.notifier.notify(NTFY_TORRENT, NTFY_ERROR, tdef.get_infohash(), repr(ds.get_error())) elif state == DLSTATUS_SEEDING: seeding_download_list.append({ u'infohash': tdef.get_infohash(), u'download': download }) if safename in self.previous_active_downloads: self.session.notifier.notify(NTFY_TORRENT, NTFY_FINISHED, tdef.get_infohash(), safename) do_checkpoint = True elif download.get_hops() == 0 and download.get_safe_seeding(): hops = tribler_defaults.get('Tribler', {}).get( 'default_number_hops', 1) self._logger.info( "Moving completed torrent to tunneled session %d for hidden seeding %r", hops, download) self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(hops) # TODO(emilon): That's a hack to work around the fact that removing a torrent is racy. self.register_task( "reschedule_download_%s" % tdef.get_infohash(), reactor.callLater( 5, self.session.start_download_from_tdef, tdef, dscfg)) self.previous_active_downloads = new_active_downloads if do_checkpoint: self.session.checkpoint_downloads() if self.state_cb_count % 4 == 0 and self.tunnel_community: self.tunnel_community.monitor_downloads(states_list) return [] # # Persistence methods # def load_checkpoint(self): """ Called by any thread """ def do_load_checkpoint(): with self.sesslock: for i, filename in enumerate( iglob( os.path.join( self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint() else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume sesslock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('downloadconfig', 'saveas') and \ isinstance(pstate.get('downloadconfig', 'saveas'), tuple): pstate.set('downloadconfig', 'saveas', pstate.get('downloadconfig', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: try: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance( ) dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash( infohash) if dest_dir and os.path.isdir(dest_dir): dscfg.set_dest_dir(dest_dir) except ValueError: self._logger.warning("tlm: torrent data invalid") if pstate is not None: has_resume_data = pstate.get('state', 'engineresumedata') is not None self._logger.debug( "tlm: load_checkpoint: resumedata %s", 'len %s ' % len(pstate.get('state', 'engineresumedata')) if has_resume_data else 'None') if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists(tdef.get_infohash()): self.add(tdef, dscfg, pstate, setupDelay=setupDelay) else: self._logger.info( "tlm: not resuming checkpoint because download has already been added" ) except Exception as e: self._logger.exception( "tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint_downloads(self): """ Checkpoints all running downloads in Tribler. Even if the list of Downloads changes in the mean time this is no problem. For removals, dllist will still hold a pointer to the download, and additions are no problem (just won't be included in list of states returned via callback). """ downloads = self.downloads.values() deferred_list = [] self._logger.debug("tlm: checkpointing %s downloads", len(downloads)) for download in downloads: deferred_list.append(download.checkpoint()) return DeferredList(deferred_list) def shutdown_downloads(self): """ Shutdown all downloads in Tribler. """ for download in self.downloads.values(): download.stop() def remove_pstate(self, infohash): def do_remove(): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug( "remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning( "remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) reactor.callFromThread(do_remove) @inlineCallbacks def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. :returns a Deferred that will fire once all dependencies acknowledge they have shutdown. """ self._logger.info("tlm: early_shutdown") self.cancel_all_pending_tasks() # Note: sesslock not held self.shutdownstarttime = timemod.time() if self.boosting_manager: yield self.boosting_manager.shutdown() self.boosting_manager = None if self.torrent_checker: yield self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: yield self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: yield self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: yield self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.video_server: yield self.video_server.shutdown_server() self.video_server = None if self.version_check_manager: self.version_check_manager.stop() self.version_check_manager = None if self.tracker_manager: yield self.tracker_manager.shutdown() self.tracker_manager = None if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = yield self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info( "lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.metadata_store is not None: yield self.metadata_store.close() self.metadata_store = None if self.tftp_handler is not None: yield self.tftp_handler.shutdown() self.tftp_handler = None if self.channelcast_db is not None: yield self.channelcast_db.close() self.channelcast_db = None if self.votecast_db is not None: yield self.votecast_db.close() self.votecast_db = None if self.mypref_db is not None: yield self.mypref_db.close() self.mypref_db = None if self.torrent_db is not None: yield self.torrent_db.close() self.torrent_db = None if self.peer_db is not None: yield self.peer_db.close() self.peer_db = None if self.mainline_dht is not None: from Tribler.Core.DecentralizedTracking import mainlineDHT yield mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: yield self.torrent_store.close() self.torrent_store = None if self.api_manager is not None: yield self.api_manager.stop() self.api_manager = None if self.watch_folder is not None: yield self.watch_folder.stop() self.watch_folder = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info( "tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr is not None: self.ltmgr.shutdown() self.ltmgr = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ self.downloads[infohash].pstate_for_restart = pstate self.register_task("save_pstate %f" % timemod.clock(), self.downloads[infohash].save_resume_data()) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate # Events from core meant for API user # def sessconfig_changed_callback(self, section, name, new_value, old_value): value_changed = new_value != old_value if section == 'libtorrent' and name == 'utp': if self.ltmgr and value_changed: self.ltmgr.set_utp(new_value) elif section == 'libtorrent' and name == 'lt_proxyauth': if self.ltmgr: self.ltmgr.set_proxy_settings( None, *self.session.get_libtorrent_proxy_settings()) # Return True/False, depending on whether or not the config value can be changed at runtime. elif (section == 'general' and name in ['nickname', 'mugshot', 'videoanalyserpath']) or \ (section == 'libtorrent' and name in ['lt_proxytype', 'lt_proxyserver', 'anon_proxyserver', 'anon_proxytype', 'anon_proxyauth', 'anon_listen_port']) or \ (section == 'torrent_collecting' and name in ['stop_collecting_threshold']) or \ (section == 'watch_folder') or \ (section == 'tunnel_community' and name in ['socks5_listen_port']) or \ (section == 'credit_mining' and name in ['max_torrents_per_source', 'max_torrents_active', 'source_interval', 'swarm_interval', 'boosting_sources', 'boosting_enabled', 'boosting_disabled', 'archive_sources']): return True else: return False return True
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.sesslock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.threadpool = ThreadPoolManager() self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.cat = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.videoplayer = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None def register(self, session, sesslock, autoload_discovery=True): if not self.registered: self.registered = True self.session = session self.sesslock = sesslock if self.session.get_torrent_store(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore( self.session.get_torrent_store_dir()) if self.session.get_enable_metadata(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore( self.session.get_metadata_store_dir()) # torrent collecting: RemoteTorrentHandler if self.session.get_torrent_collecting(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.get_megacache(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import ( PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.get_state_dir()) self.cat = Category.getInstance(self.session) # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) self.tracker_manager.initialize() if self.session.get_videoplayer(): self.videoplayer = VideoPlayer(self.session) # Dispersy self.session.dispersy_member = None self.tftp_handler = None if self.session.get_dispersy(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint # set communication endpoint endpoint = StandaloneEndpoint(self.session.get_dispersy_port(), ip=self.session.get_ip()) working_directory = unicode(self.session.get_state_dir()) self.dispersy = Dispersy(endpoint, working_directory) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() if self.session.get_enable_torrent_search( ) or self.session.get_enable_channel_search(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if self.session.get_enable_channel_search(): from Tribler.Core.Modules.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if not self.initComplete: self.init(autoload_discovery) def init(self, autoload_discovery): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") now = timemod.time() success = self.dispersy.start(autoload_discovery) diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info( "lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK self.session.dispersy_member = blockingCallFromThread( reactor, self.dispersy.get_member, private_key=self.dispersy.crypto.key_to_bin( M2CryptoSK( filename=self.session.get_permid_keypair_filename()))) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.get_megacache(): self.dispersy.database.attach_commit_callback( self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) @blocking_call_on_reactor_thread def load_communities(): # load communities # Search Community if self.session.get_enable_torrent_search(): from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load( SearchCommunity, self.session.dispersy_member, load=True, kargs={'tribler_session': self.session}) # AllChannel Community if self.session.get_enable_channel_search(): from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load( AllChannelCommunity, self.session.dispersy_member, load=True, kargs={'tribler_session': self.session}) load_communities() from Tribler.Core.DecentralizedTracking import mainlineDHT try: self.mainline_dht = mainlineDHT.init( ('127.0.0.1', self.session.get_mainline_dht_listen_port()), self.session.get_state_dir()) self.upnp_ports.append( (self.session.get_mainline_dht_listen_port(), 'UDP')) except: print_exc() if self.session.get_libtorrent(): from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() # FIXME(lipu): upnp APIs are not exported in libtorrent python-binding. #for port, protocol in self.upnp_ports: # self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.get_torrent_checking(): try: from Tribler.Core.TorrentChecker.torrent_checker import TorrentChecker self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() except: print_exc() if self.rtorrent_handler: self.rtorrent_handler.initialize() self.initComplete = True def add(self, tdef, dscfg, pstate=None, initialdlstatus=None, setupDelay=0, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if not isinstance( tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d d.setup(dscfg, pstate, initialdlstatus, self.network_engine_wrapper_created_callback, wrapperDelay=setupDelay) finally: self.sesslock.release() if d and not hidden and self.session.get_megacache(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent( tdef, extra_info={'status': 'good'}) write_my_pref() return d def network_engine_wrapper_created_callback(self, d, pstate): """ Called by network thread """ try: if pstate is None: # Checkpoint at startup (infohash, pstate) = d.network_checkpoint() self.save_download_pstate(infohash, pstate) except: print_exc() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ with self.sesslock: d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) def remove_id(self, infohash): @forceDBThread def do_db(infohash): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.get_megacache(): do_db(infohash) def get_downloads(self): """ Called by any thread """ with self.sesslock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.sesslock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.sesslock: return infohash in self.downloads def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch( torrent_id, new_trackers) self.session.notifier.notify( NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.get_megacache(): update_trackers_db(infohash, new_trackers) elif not isinstance( old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def set_download_states_callback(self, usercallback, getpeerlist, when=0.0): """ Called by any thread """ for d in self.downloads.values(): # Arno, 2012-05-23: At Niels' request to get total transferred # stats. Causes MOREINFO message to be sent from swift proc # for every initiated dl. # 2012-07-31: Turn MOREINFO on/off on demand for efficiency. # 2013-04-17: Libtorrent now uses set_moreinfo_stats as well. d.set_moreinfo_stats(True in getpeerlist or d.get_def().get_infohash() in getpeerlist) network_set_download_states_callback_lambda = lambda: self.network_set_download_states_callback( usercallback) self.threadpool.add_task(network_set_download_states_callback_lambda, when) def network_set_download_states_callback(self, usercallback): """ Called by network thread """ dslist = [] for d in self.downloads.values(): try: ds = d.network_get_state(None, False) dslist.append(ds) except: # Niels, 2012-10-18: If Swift connection is crashing, it will raise an exception # We're catching it here to continue building the downloadstates print_exc() # Invoke the usercallback function on a separate thread. # After the callback is invoked, the return values will be passed to the # returncallback for post-callback processing. def session_getstate_usercallback_target(): when, newgetpeerlist = usercallback(dslist) if when > 0.0: # reschedule self.set_download_states_callback(usercallback, newgetpeerlist, when=when) self.threadpool.add_task(session_getstate_usercallback_target) # # Persistence methods # def load_checkpoint(self, initialdlstatus=None, initialdlstatus_dict={}): """ Called by any thread """ def do_load_checkpoint(initialdlstatus, initialdlstatus_dict): with self.sesslock: for i, filename in enumerate( iglob( os.path.join( self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, initialdlstatus, initialdlstatus_dict, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint(initialdlstatus, initialdlstatus_dict) else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume sesslock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, initialdlstatus=None, initialdlstatus_dict={}, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('downloadconfig', 'saveas') and \ isinstance(pstate.get('downloadconfig', 'saveas'), tuple): pstate.set('downloadconfig', 'saveas', pstate.get('downloadconfig', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance() dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash(infohash) if dest_dir: if os.path.isdir(dest_dir) or dest_dir == '': dscfg.set_dest_dir(dest_dir) self._logger.debug("tlm: load_checkpoint: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) if pstate is None or pstate.get('state', 'engineresumedata') is None: self._logger.debug("tlm: load_checkpoint: resumedata None") else: self._logger.debug("tlm: load_checkpoint: resumedata len %d", len(pstate.get('state', 'engineresumedata'))) if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists(tdef.get_infohash()): initialdlstatus = initialdlstatus_dict.get( tdef.get_infohash(), initialdlstatus) self.add(tdef, dscfg, pstate, initialdlstatus, setupDelay=setupDelay) else: self._logger.info( "tlm: not resuming checkpoint because download has already been added" ) except Exception as e: self._logger.exception( "tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint(self, stop=False, checkpoint=True, gracetime=2.0): """ Called by any thread, assume sesslock already held """ # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() self._logger.debug("tlm: checkpointing %s stopping %s", len(dllist), stop) network_checkpoint_callback_lambda = lambda: self.network_checkpoint_callback( dllist, stop, checkpoint, gracetime) self.threadpool.add_task(network_checkpoint_callback_lambda, 0.0) def network_checkpoint_callback(self, dllist, stop, checkpoint, gracetime): """ Called by network thread """ if checkpoint: for d in dllist: try: # Tell all downloads to stop, and save their persistent state # in a infohash -> pstate dict which is then passed to the user # for storage. # if stop: (infohash, pstate) = d.network_stop(False, False) else: (infohash, pstate) = d.network_checkpoint() self._logger.debug("tlm: network checkpointing: %s %s", d.get_def().get_name(), pstate) self.save_download_pstate(infohash, pstate) except Exception as e: self._logger.exception("Exception while checkpointing: %s", d.get_def().get_name()) if stop: # Some grace time for early shutdown tasks if self.shutdownstarttime is not None: now = timemod.time() diff = now - self.shutdownstarttime if diff < gracetime: self._logger.info( "tlm: shutdown: delaying for early shutdown tasks %s", gracetime - diff) delay = gracetime - diff network_shutdown_callback_lambda = lambda: self.network_shutdown( ) self.threadpool.add_task(network_shutdown_callback_lambda, delay) return self.network_shutdown() def remove_pstate(self, infohash): network_remove_pstate_callback_lambda = lambda: self.network_remove_pstate_callback( infohash) self.threadpool.add_task(network_remove_pstate_callback_lambda, 0.0) def network_remove_pstate_callback(self, infohash): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug( "remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning( "remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. """ self._logger.info("tlm: early_shutdown") self.cancel_all_pending_tasks() # Note: sesslock not held self.shutdownstarttime = timemod.time() if self.torrent_checker: self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.videoplayer: self.videoplayer.shutdown() self.videoplayer = None if self.tracker_manager: self.tracker_manager.shutdown() self.tracker_manager = None if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info( "lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.metadata_store is not None: self.metadata_store.close() self.metadata_store = None if self.tftp_handler: self.tftp_handler.shutdown() self.tftp_handler = None if self.session.get_megacache(): self.channelcast_db.close() self.votecast_db.close() self.mypref_db.close() self.torrent_db.close() self.peer_db.close() self.channelcast_db = None self.votecast_db = None self.mypref_db = None self.torrent_db = None self.peer_db = None if self.mainline_dht: from Tribler.Core.DecentralizedTracking import mainlineDHT mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: self.torrent_store.close() self.torrent_store = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info( "tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr: self.ltmgr.shutdown() self.ltmgr = None if self.threadpool: self.threadpool.cancel_all_pending_tasks() self.threadpool = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) self._logger.debug("tlm: network checkpointing: to file %s", filename) pstate.write_file(filename) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate # Events from core meant for API user # def sessconfig_changed_callback(self, section, name, new_value, old_value): value_changed = new_value != old_value if section == 'libtorrent' and name == 'utp': if self.ltmgr and value_changed: self.ltmgr.set_utp(new_value) elif section == 'libtorrent' and name == 'lt_proxyauth': if self.ltmgr: self.ltmgr.set_proxy_settings( None, *self.session.get_libtorrent_proxy_settings()) # Return True/False, depending on whether or not the config value can be changed at runtime. elif (section == 'general' and name in ['nickname', 'mugshot', 'videoanalyserpath']) or \ (section == 'libtorrent' and name in ['lt_proxytype', 'lt_proxyserver', 'anon_proxyserver', 'anon_proxytype', 'anon_proxyauth', 'anon_listen_port']) or \ (section == 'torrent_collecting' and name in ['stop_collecting_threshold']) or \ (section == 'tunnel_community' and name in ['socks5_listen_port']): return True else: return False return True
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.ipv8 = None self.ipv8_start_time = 0 self.state_cb_count = 0 self.previous_active_downloads = [] self.download_states_lc = None self.get_peer_list = [] self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.session_lock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.api_manager = None self.watch_folder = None self.version_check_manager = None self.resource_monitor = None self.category = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.gigachannel_manager = None self.video_server = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None self.trustchain_community = None self.wallets = {} self.popularity_community = None self.gigachannel_community = None self.startup_deferred = Deferred() self.credit_mining_manager = None self.market_community = None self.dht_community = None self.payout_manager = None self.mds = None def register(self, session, session_lock): assert isInIOThread() if not self.registered: self.registered = True self.session = session self.session_lock = session_lock self.tracker_manager = TrackerManager(self.session) # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root # certificates provided by the system trust store. if sys.platform == 'darwin': os.environ['SSL_CERT_FILE'] = os.path.join( get_lib_path(), 'root_certs_mac.pem') if self.session.config.get_video_server_enabled(): self.video_server = VideoServer( self.session.config.get_video_server_port(), self.session) self.video_server.start() # IPv8 if self.session.config.get_ipv8_enabled(): from Tribler.pyipv8.ipv8.configuration import get_default_configuration ipv8_config = get_default_configuration() ipv8_config['port'] = self.session.config.get_ipv8_port() ipv8_config['address'] = self.session.config.get_ipv8_address() ipv8_config['overlays'] = [] ipv8_config['keys'] = [] # We load the keys ourselves if self.session.config.get_ipv8_bootstrap_override(): import Tribler.pyipv8.ipv8.community as community_file community_file._DEFAULT_ADDRESSES = [ self.session.config.get_ipv8_bootstrap_override() ] community_file._DNS_ADDRESSES = [] self.ipv8 = IPv8(ipv8_config, enable_statistics=self.session.config. get_ipv8_statistics()) self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", self.session.config. get_tunnel_community_socks5_listen_ports())) if not self.initComplete: self.init() self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED]) self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None) return self.startup_deferred def on_tribler_started(self, subject, changetype, objectID, *args): reactor.callFromThread(self.startup_deferred.callback, None) def load_ipv8_overlays(self): if self.session.config.get_testnet(): peer = Peer(self.session.trustchain_testnet_keypair) else: peer = Peer(self.session.trustchain_keypair) discovery_community = DiscoveryCommunity(peer, self.ipv8.endpoint, self.ipv8.network) discovery_community.resolve_dns_bootstrap_addresses() self.ipv8.overlays.append(discovery_community) self.ipv8.strategies.append((RandomChurn(discovery_community), -1)) self.ipv8.strategies.append( (PeriodicSimilarity(discovery_community), -1)) self.ipv8.strategies.append((RandomWalk(discovery_community), 20)) # TrustChain Community if self.session.config.get_trustchain_enabled(): from Tribler.pyipv8.ipv8.attestation.trustchain.community import TrustChainCommunity, \ TrustChainTestnetCommunity community_cls = TrustChainTestnetCommunity if self.session.config.get_testnet( ) else TrustChainCommunity self.trustchain_community = community_cls( peer, self.ipv8.endpoint, self.ipv8.network, working_directory=self.session.config.get_state_dir()) self.ipv8.overlays.append(self.trustchain_community) self.ipv8.strategies.append( (EdgeWalk(self.trustchain_community), 20)) tc_wallet = TrustchainWallet(self.trustchain_community) self.wallets[tc_wallet.get_identifier()] = tc_wallet # DHT Community if self.session.config.get_dht_enabled(): from Tribler.pyipv8.ipv8.dht.discovery import DHTDiscoveryCommunity self.dht_community = DHTDiscoveryCommunity(peer, self.ipv8.endpoint, self.ipv8.network) self.ipv8.overlays.append(self.dht_community) self.ipv8.strategies.append((RandomWalk(self.dht_community), 20)) # Tunnel Community if self.session.config.get_tunnel_community_enabled(): from Tribler.community.triblertunnel.community import TriblerTunnelCommunity, TriblerTunnelTestnetCommunity from Tribler.community.triblertunnel.discovery import GoldenRatioStrategy community_cls = TriblerTunnelTestnetCommunity if self.session.config.get_testnet() else \ TriblerTunnelCommunity random_slots = self.session.config.get_tunnel_community_random_slots( ) competing_slots = self.session.config.get_tunnel_community_competing_slots( ) dht_provider = DHTCommunityProvider( self.dht_community, self.session.config.get_ipv8_port()) settings = TunnelSettings() settings.min_circuits = 3 settings.max_circuits = 10 self.tunnel_community = community_cls( peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, dht_provider=dht_provider, bandwidth_wallet=self.wallets["MB"], random_slots=random_slots, competing_slots=competing_slots, settings=settings) self.ipv8.overlays.append(self.tunnel_community) self.ipv8.strategies.append( (RandomWalk(self.tunnel_community), 20)) self.ipv8.strategies.append( (GoldenRatioStrategy(self.tunnel_community), -1)) # Market Community if self.session.config.get_market_community_enabled( ) and self.session.config.get_dht_enabled(): from Tribler.community.market.community import MarketCommunity, MarketTestnetCommunity community_cls = MarketTestnetCommunity if self.session.config.get_testnet( ) else MarketCommunity self.market_community = community_cls( peer, self.ipv8.endpoint, self.ipv8.network, tribler_session=self.session, trustchain=self.trustchain_community, dht=self.dht_community, wallets=self.wallets, working_directory=self.session.config.get_state_dir(), record_transactions=self.session.config. get_record_transactions()) self.ipv8.overlays.append(self.market_community) self.ipv8.strategies.append( (RandomWalk(self.market_community), 20)) # Popular Community if self.session.config.get_popularity_community_enabled(): from Tribler.community.popularity.community import PopularityCommunity self.popularity_community = PopularityCommunity( peer, self.ipv8.endpoint, self.ipv8.network, metadata_store=self.session.lm.mds, session=self.session) self.ipv8.overlays.append(self.popularity_community) self.ipv8.strategies.append( (RandomWalk(self.popularity_community), 20)) self.popularity_community.start() # Gigachannel Community if self.session.config.get_chant_enabled(): from Tribler.community.gigachannel.community import GigaChannelCommunity, GigaChannelTestnetCommunity from Tribler.community.gigachannel.sync_strategy import SyncChannels community_cls = GigaChannelTestnetCommunity if self.session.config.get_testnet( ) else GigaChannelCommunity self.gigachannel_community = community_cls( peer, self.ipv8.endpoint, self.ipv8.network, self.mds, notifier=self.session.notifier) self.ipv8.overlays.append(self.gigachannel_community) self.ipv8.strategies.append( (RandomWalk(self.gigachannel_community), 20)) self.ipv8.strategies.append( (SyncChannels(self.gigachannel_community), 20)) def enable_ipv8_statistics(self): if self.session.config.get_ipv8_statistics(): for overlay in self.ipv8.overlays: self.ipv8.endpoint.enable_community_statistics( overlay.get_prefix(), True) def init(self): # Wallets if self.session.config.get_bitcoinlib_enabled(): try: from Tribler.Core.Modules.wallet.btc_wallet import BitcoinWallet, BitcoinTestnetWallet wallet_path = os.path.join(self.session.config.get_state_dir(), 'wallet') btc_wallet = BitcoinWallet(wallet_path) btc_testnet_wallet = BitcoinTestnetWallet(wallet_path) self.wallets[btc_wallet.get_identifier()] = btc_wallet self.wallets[ btc_testnet_wallet.get_identifier()] = btc_testnet_wallet except Exception as exc: self._logger.error("bitcoinlib library cannot be loaded: %s", exc) if self.session.config.get_chant_enabled(): channels_dir = os.path.join( self.session.config.get_chant_channels_dir()) database_path = os.path.join(self.session.config.get_state_dir(), 'sqlite', 'metadata.db') self.mds = MetadataStore(database_path, channels_dir, self.session.trustchain_keypair) if self.session.config.get_dummy_wallets_enabled(): # For debugging purposes, we create dummy wallets dummy_wallet1 = DummyWallet1() self.wallets[dummy_wallet1.get_identifier()] = dummy_wallet1 dummy_wallet2 = DummyWallet2() self.wallets[dummy_wallet2.get_identifier()] = dummy_wallet2 if self.ipv8: self.ipv8_start_time = time.time() self.load_ipv8_overlays() self.enable_ipv8_statistics() tunnel_community_ports = self.session.config.get_tunnel_community_socks5_listen_ports( ) self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", tunnel_community_ports)) if self.session.config.get_libtorrent_enabled(): self.session.readable_status = STATE_START_LIBTORRENT from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() for port, protocol in self.upnp_ports: self.ltmgr.add_upnp_mapping(port, protocol) if self.session.config.get_chant_enabled(): self.gigachannel_manager = GigaChannelManager(self.session) self.gigachannel_manager.start() # add task for tracker checking if self.session.config.get_torrent_checking_enabled(): self.session.readable_status = STATE_START_TORRENT_CHECKER self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() if self.api_manager: self.session.readable_status = STATE_START_API_ENDPOINTS self.api_manager.root_endpoint.start_endpoints() if self.session.config.get_watch_folder_enabled(): self.session.readable_status = STATE_START_WATCH_FOLDER self.watch_folder = WatchFolder(self.session) self.watch_folder.start() if self.session.config.get_credit_mining_enabled(): self.session.readable_status = STATE_START_CREDIT_MINING from Tribler.Core.CreditMining.CreditMiningManager import CreditMiningManager self.credit_mining_manager = CreditMiningManager(self.session) if self.session.config.get_resource_monitor_enabled(): self.resource_monitor = ResourceMonitor(self.session) self.resource_monitor.start() if self.session.config.get_version_checker_enabled(): self.version_check_manager = VersionCheckManager(self.session) self.version_check_manager.start() self.session.set_download_states_callback(self.sesscb_states_callback) if self.session.config.get_ipv8_enabled( ) and self.session.config.get_trustchain_enabled(): self.payout_manager = PayoutManager(self.trustchain_community, self.dht_community) self.initComplete = True def add(self, tdef, dscfg, pstate=None, setupDelay=0, hidden=False, share_mode=False, checkpoint_disabled=False): """ Called by any thread """ d = None with self.session_lock: infohash = tdef.get_infohash() # Create the destination directory if it does not exist yet try: if not os.path.isdir(dscfg.get_dest_dir()): os.makedirs(dscfg.get_dest_dir()) except OSError: self._logger.error( "Unable to create the download destination directory.") if dscfg.get_time_added() == 0: dscfg.set_time_added(int(timemod.time())) # Check if running or saved on disk if infohash in self.downloads: self._logger.info( "Torrent already exists in the downloads. Infohash:%s", hexlify(infohash)) from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d setup_deferred = d.setup(dscfg, pstate, wrapperDelay=setupDelay, share_mode=share_mode, checkpoint_disabled=checkpoint_disabled) setup_deferred.addCallback(self.on_download_handle_created) return d def on_download_handle_created(self, download): """ This method is called when the download handle has been created. Immediately checkpoint the download and write the resume data. """ return download.checkpoint() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ out = None with self.session_lock: out = d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] return out or succeed(None) def get_downloads(self): """ Called by any thread """ with self.session_lock: return self.downloads.values() # copy, is mutable def get_channel_downloads(self): with self.session_lock: return [ download for download in self.downloads.values() if download.get_channel_download() ] def get_download(self, infohash): """ Called by any thread """ with self.session_lock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.session_lock: return infohash in self.downloads @inlineCallbacks def update_download_hops(self, download, new_hops): """ Update the amount of hops for a specified download. This can be done on runtime. """ infohash = hexlify(download.tdef.get_infohash()) self._logger.info("Updating the amount of hops of download %s", infohash) pstate = download.get_persistent_download_config() pstate.set('state', 'engineresumedata', (yield download.save_resume_data())) yield self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(new_hops) # If the user wants to change the hop count to 0, don't automatically bump this up to 1 anymore dscfg.set_safe_seeding(False) self.session.start_download_from_tdef(download.tdef, dscfg, pstate=pstate) def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() # # State retrieval # def stop_download_states_callback(self): """ Stop any download states callback if present. """ if self.is_pending_task_active("download_states_lc"): self.cancel_pending_task("download_states_lc") def set_download_states_callback(self, user_callback, interval=1.0): """ Set the download state callback. Remove any old callback if it's present. """ self.stop_download_states_callback() self._logger.debug( "Starting the download state callback with interval %f", interval) self.download_states_lc = self.register_task( "download_states_lc", LoopingCall(self._invoke_states_cb, user_callback)) self.download_states_lc.start(interval) def _invoke_states_cb(self, callback): """ Invoke the download states callback with a list of the download states. """ dslist = [] for d in self.downloads.values(): d.set_moreinfo_stats( True in self.get_peer_list or d.get_def().get_infohash() in self.get_peer_list) ds = d.network_get_state(None) dslist.append(ds) def on_cb_done(new_get_peer_list): self.get_peer_list = new_get_peer_list return deferToThread(callback, dslist).addCallback(on_cb_done) def sesscb_states_callback(self, states_list): """ This method is periodically (every second) called with a list of the download states of the active downloads. """ self.state_cb_count += 1 # Check to see if a download has finished new_active_downloads = [] do_checkpoint = False seeding_download_list = [] for ds in states_list: state = ds.get_status() download = ds.get_download() tdef = download.get_def() safename = tdef.get_name_as_unicode() infohash = tdef.get_infohash() if state == DLSTATUS_DOWNLOADING: new_active_downloads.append(infohash) elif state == DLSTATUS_STOPPED_ON_ERROR: self._logger.error("Error during download: %s", repr(ds.get_error())) if self.download_exists(infohash): self.get_download(infohash).stop() self.session.notifier.notify(NTFY_TORRENT, NTFY_ERROR, infohash, repr(ds.get_error())) elif state == DLSTATUS_SEEDING: seeding_download_list.append({ u'infohash': infohash, u'download': download }) if infohash in self.previous_active_downloads: self.session.notifier.notify(NTFY_TORRENT, NTFY_FINISHED, infohash, safename) do_checkpoint = True elif download.get_hops() == 0 and download.get_safe_seeding(): # Re-add the download with anonymity enabled hops = self.session.config.get_default_number_hops() self.update_download_hops(download, hops) # Check the peers of this download every five seconds and add them to the payout manager when # this peer runs a Tribler instance if self.state_cb_count % 5 == 0 and download.get_hops( ) == 0 and self.payout_manager: for peer in download.get_peerlist(): if peer["extended_version"].startswith('Tribler'): self.payout_manager.update_peer( unhexlify(peer["id"]), infohash, peer["dtotal"]) self.previous_active_downloads = new_active_downloads if do_checkpoint: self.session.checkpoint_downloads() if self.state_cb_count % 4 == 0: if self.tunnel_community: self.tunnel_community.monitor_downloads(states_list) if self.credit_mining_manager: self.credit_mining_manager.monitor_downloads(states_list) return [] # # Persistence methods # def load_checkpoint(self): """ Called by any thread """ def do_load_checkpoint(): with self.session_lock: for i, filename in enumerate( iglob( os.path.join( self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint() else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume session_lock already held """ try: basename = hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, setupDelay=0): tdef = dscfg = pstate = None pstate = self.load_download_pstate(filename) metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('download_defaults', 'saveas') and \ isinstance(pstate.get('download_defaults', 'saveas'), tuple): pstate.set('download_defaults', 'saveas', pstate.get('download_defaults', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) if pstate is not None: has_resume_data = pstate.get('state', 'engineresumedata') is not None self._logger.debug( "tlm: load_checkpoint: resumedata %s", 'len %s ' % len(pstate.get('state', 'engineresumedata')) if has_resume_data else 'None') if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if self.download_exists(tdef.get_infohash()): self._logger.info( "tlm: not resuming checkpoint because download has already been added" ) elif dscfg.get_credit_mining( ) and not self.session.config.get_credit_mining_enabled(): self._logger.info( "tlm: not resuming checkpoint since token mining is disabled" ) else: self.add(tdef, dscfg, pstate, setupDelay=setupDelay) except Exception as e: self._logger.exception( "tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint_downloads(self): """ Checkpoints all running downloads in Tribler. Even if the list of Downloads changes in the mean time this is no problem. For removals, dllist will still hold a pointer to the download, and additions are no problem (just won't be included in list of states returned via callback). """ downloads = self.downloads.values() deferred_list = [] self._logger.debug("tlm: checkpointing %s downloads", len(downloads)) for download in downloads: deferred_list.append(download.checkpoint()) return DeferredList(deferred_list) def shutdown_downloads(self): """ Shutdown all downloads in Tribler. """ for download in self.downloads.values(): download.stop() def remove_pstate(self, infohash): def do_remove(): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug( "remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning( "remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) reactor.callFromThread(do_remove) @inlineCallbacks def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. :returns a Deferred that will fire once all dependencies acknowledge they have shutdown. """ self._logger.info("tlm: early_shutdown") self.shutdown_task_manager() # Note: session_lock not held self.shutdownstarttime = timemod.time() if self.credit_mining_manager: self.session.notify_shutdown_state( "Shutting down Credit Mining...") yield self.credit_mining_manager.shutdown() self.credit_mining_manager = None if self.torrent_checker: self.session.notify_shutdown_state( "Shutting down Torrent Checker...") yield self.torrent_checker.shutdown() self.torrent_checker = None if self.gigachannel_manager: self.session.notify_shutdown_state( "Shutting down Gigachannel Manager...") yield self.gigachannel_manager.shutdown() self.gigachannel_manager = None if self.video_server: self.session.notify_shutdown_state("Shutting down Video Server...") yield self.video_server.shutdown_server() self.video_server = None if self.version_check_manager: self.session.notify_shutdown_state( "Shutting down Version Checker...") self.version_check_manager.stop() self.version_check_manager = None if self.resource_monitor: self.session.notify_shutdown_state( "Shutting down Resource Monitor...") self.resource_monitor.stop() self.resource_monitor = None self.tracker_manager = None if self.tunnel_community and self.trustchain_community: # We unload these overlays manually since the TrustChain has to be unloaded after the tunnel overlay. tunnel_community = self.tunnel_community self.tunnel_community = None self.session.notify_shutdown_state("Unloading Tunnel Community...") yield self.ipv8.unload_overlay(tunnel_community) trustchain_community = self.trustchain_community self.trustchain_community = None self.session.notify_shutdown_state( "Shutting down TrustChain Community...") yield self.ipv8.unload_overlay(trustchain_community) if self.ipv8: self.session.notify_shutdown_state("Shutting down IPv8...") yield self.ipv8.stop(stop_reactor=False) if self.channelcast_db is not None: self.session.notify_shutdown_state( "Shutting down ChannelCast DB...") yield self.channelcast_db.close() self.channelcast_db = None if self.votecast_db is not None: self.session.notify_shutdown_state("Shutting down VoteCast DB...") yield self.votecast_db.close() self.votecast_db = None if self.mypref_db is not None: self.session.notify_shutdown_state( "Shutting down Preference DB...") yield self.mypref_db.close() self.mypref_db = None if self.torrent_db is not None: self.session.notify_shutdown_state("Shutting down Torrent DB...") yield self.torrent_db.close() self.torrent_db = None if self.peer_db is not None: self.session.notify_shutdown_state("Shutting down Peer DB...") yield self.peer_db.close() self.peer_db = None if self.watch_folder is not None: self.session.notify_shutdown_state("Shutting down Watch Folder...") yield self.watch_folder.stop() self.watch_folder = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info( "tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr is not None: self.ltmgr.shutdown() self.ltmgr = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ self.downloads[infohash].pstate_for_restart = pstate self.register_anonymous_task( "save_pstate", self.downloads[infohash].save_resume_data()) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.sesslock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.threadpool = ThreadPoolManager() self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.cat = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.videoplayer = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None def register(self, session, sesslock, autoload_discovery=True): if not self.registered: self.registered = True self.session = session self.sesslock = sesslock if self.session.get_torrent_store(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore(self.session.get_torrent_store_dir()) if self.session.get_enable_metadata(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore(self.session.get_metadata_store_dir()) # torrent collecting: RemoteTorrentHandler if self.session.get_torrent_collecting(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.get_megacache(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import (PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.get_state_dir()) self.cat = Category.getInstance(self.session) # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) self.tracker_manager.initialize() if self.session.get_videoplayer(): self.videoplayer = VideoPlayer(self.session) # Dispersy self.session.dispersy_member = None self.tftp_handler = None if self.session.get_dispersy(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint # set communication endpoint endpoint = StandaloneEndpoint(self.session.get_dispersy_port(), ip=self.session.get_ip()) working_directory = unicode(self.session.get_state_dir()) self.dispersy = Dispersy(endpoint, working_directory) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() if self.session.get_enable_torrent_search() or self.session.get_enable_channel_search(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if self.session.get_enable_channel_search(): from Tribler.Core.Modules.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if not self.initComplete: self.init(autoload_discovery) def init(self, autoload_discovery): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") now = timemod.time() success = self.dispersy.start(autoload_discovery) diff = timemod.time() - now if success: self._logger.info("lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info("lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK self.session.dispersy_member = blockingCallFromThread(reactor, self.dispersy.get_member, private_key=self.dispersy.crypto.key_to_bin(M2CryptoSK(filename=self.session.get_permid_keypair_filename()))) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.get_megacache(): self.dispersy.database.attach_commit_callback(self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) @blocking_call_on_reactor_thread def load_communities(): # load communities # Search Community if self.session.get_enable_torrent_search(): from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load(SearchCommunity, self.session.dispersy_member, load=True, kargs={'tribler_session': self.session}) # AllChannel Community if self.session.get_enable_channel_search(): from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load(AllChannelCommunity, self.session.dispersy_member, load=True, kargs={'tribler_session': self.session}) load_communities() from Tribler.Core.DecentralizedTracking import mainlineDHT try: self.mainline_dht = mainlineDHT.init(('127.0.0.1', self.session.get_mainline_dht_listen_port()), self.session.get_state_dir()) self.upnp_ports.append((self.session.get_mainline_dht_listen_port(), 'UDP')) except: print_exc() if self.session.get_libtorrent(): from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() # FIXME(lipu): upnp APIs are not exported in libtorrent python-binding. #for port, protocol in self.upnp_ports: # self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.get_torrent_checking(): try: from Tribler.Core.TorrentChecker.torrent_checker import TorrentChecker self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() except: print_exc() if self.rtorrent_handler: self.rtorrent_handler.initialize() self.initComplete = True def add(self, tdef, dscfg, pstate=None, initialdlstatus=None, setupDelay=0, hidden=False): """ Called by any thread """ d = None self.sesslock.acquire() try: if not isinstance(tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException() from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d d.setup(dscfg, pstate, initialdlstatus, self.network_engine_wrapper_created_callback, wrapperDelay=setupDelay) finally: self.sesslock.release() if d and not hidden and self.session.get_megacache(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent(tdef, extra_info={'status': 'good'}) write_my_pref() return d def network_engine_wrapper_created_callback(self, d, pstate): """ Called by network thread """ try: if pstate is None: # Checkpoint at startup (infohash, pstate) = d.network_checkpoint() self.save_download_pstate(infohash, pstate) except: print_exc() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ with self.sesslock: d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) def remove_id(self, infohash): @forceDBThread def do_db(infohash): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.get_megacache(): do_db(infohash) def get_downloads(self): """ Called by any thread """ with self.sesslock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.sesslock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.sesslock: return infohash in self.downloads def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch(torrent_id, new_trackers) self.session.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.get_megacache(): update_trackers_db(infohash, new_trackers) elif not isinstance(old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def set_download_states_callback(self, usercallback, getpeerlist, when=0.0): """ Called by any thread """ for d in self.downloads.values(): # Arno, 2012-05-23: At Niels' request to get total transferred # stats. Causes MOREINFO message to be sent from swift proc # for every initiated dl. # 2012-07-31: Turn MOREINFO on/off on demand for efficiency. # 2013-04-17: Libtorrent now uses set_moreinfo_stats as well. d.set_moreinfo_stats(True in getpeerlist or d.get_def().get_infohash() in getpeerlist) network_set_download_states_callback_lambda = lambda: self.network_set_download_states_callback(usercallback) self.threadpool.add_task(network_set_download_states_callback_lambda, when) def network_set_download_states_callback(self, usercallback): """ Called by network thread """ dslist = [] for d in self.downloads.values(): try: ds = d.network_get_state(None, False) dslist.append(ds) except: # Niels, 2012-10-18: If Swift connection is crashing, it will raise an exception # We're catching it here to continue building the downloadstates print_exc() # Invoke the usercallback function on a separate thread. # After the callback is invoked, the return values will be passed to the # returncallback for post-callback processing. def session_getstate_usercallback_target(): when, newgetpeerlist = usercallback(dslist) if when > 0.0: # reschedule self.set_download_states_callback(usercallback, newgetpeerlist, when=when) self.threadpool.add_task(session_getstate_usercallback_target) # # Persistence methods # def load_checkpoint(self, initialdlstatus=None, initialdlstatus_dict={}): """ Called by any thread """ def do_load_checkpoint(initialdlstatus, initialdlstatus_dict): with self.sesslock: for i, filename in enumerate(iglob(os.path.join(self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, initialdlstatus, initialdlstatus_dict, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint(initialdlstatus, initialdlstatus_dict) else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume sesslock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, initialdlstatus=None, initialdlstatus_dict={}, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('downloadconfig', 'saveas') and \ isinstance(pstate.get('downloadconfig', 'saveas'), tuple): pstate.set('downloadconfig', 'saveas', pstate.get('downloadconfig', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance() dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash(infohash) if dest_dir: if os.path.isdir(dest_dir) or dest_dir == '': dscfg.set_dest_dir(dest_dir) self._logger.debug("tlm: load_checkpoint: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) if pstate is None or pstate.get('state', 'engineresumedata') is None: self._logger.debug("tlm: load_checkpoint: resumedata None") else: self._logger.debug("tlm: load_checkpoint: resumedata len %d", len(pstate.get('state', 'engineresumedata'))) if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists(tdef.get_infohash()): initialdlstatus = initialdlstatus_dict.get(tdef.get_infohash(), initialdlstatus) self.add(tdef, dscfg, pstate, initialdlstatus, setupDelay=setupDelay) else: self._logger.info("tlm: not resuming checkpoint because download has already been added") except Exception as e: self._logger.exception("tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint(self, stop=False, checkpoint=True, gracetime=2.0): """ Called by any thread, assume sesslock already held """ # Even if the list of Downloads changes in the mean time this is # no problem. For removals, dllist will still hold a pointer to the # Download, and additions are no problem (just won't be included # in list of states returned via callback. # dllist = self.downloads.values() self._logger.debug("tlm: checkpointing %s stopping %s", len(dllist), stop) network_checkpoint_callback_lambda = lambda: self.network_checkpoint_callback(dllist, stop, checkpoint, gracetime) self.threadpool.add_task(network_checkpoint_callback_lambda, 0.0) def network_checkpoint_callback(self, dllist, stop, checkpoint, gracetime): """ Called by network thread """ if checkpoint: for d in dllist: try: # Tell all downloads to stop, and save their persistent state # in a infohash -> pstate dict which is then passed to the user # for storage. # if stop: (infohash, pstate) = d.network_stop(False, False) else: (infohash, pstate) = d.network_checkpoint() self._logger.debug("tlm: network checkpointing: %s %s", d.get_def().get_name(), pstate) self.save_download_pstate(infohash, pstate) except Exception as e: self._logger.exception("Exception while checkpointing: %s", d.get_def().get_name()) if stop: # Some grace time for early shutdown tasks if self.shutdownstarttime is not None: now = timemod.time() diff = now - self.shutdownstarttime if diff < gracetime: self._logger.info("tlm: shutdown: delaying for early shutdown tasks %s", gracetime - diff) delay = gracetime - diff network_shutdown_callback_lambda = lambda: self.network_shutdown() self.threadpool.add_task(network_shutdown_callback_lambda, delay) return self.network_shutdown() def remove_pstate(self, infohash): network_remove_pstate_callback_lambda = lambda: self.network_remove_pstate_callback(infohash) self.threadpool.add_task(network_remove_pstate_callback_lambda, 0.0) def network_remove_pstate_callback(self, infohash): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug("remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning("remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. """ self._logger.info("tlm: early_shutdown") self.cancel_all_pending_tasks() # Note: sesslock not held self.shutdownstarttime = timemod.time() if self.torrent_checker: self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.videoplayer: self.videoplayer.shutdown() self.videoplayer = None if self.tracker_manager: self.tracker_manager.shutdown() self.tracker_manager = None if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info("lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info("lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.metadata_store is not None: self.metadata_store.close() self.metadata_store = None if self.tftp_handler: self.tftp_handler.shutdown() self.tftp_handler = None if self.session.get_megacache(): self.channelcast_db.close() self.votecast_db.close() self.mypref_db.close() self.torrent_db.close() self.peer_db.close() self.channelcast_db = None self.votecast_db = None self.mypref_db = None self.torrent_db = None self.peer_db = None if self.mainline_dht: from Tribler.Core.DecentralizedTracking import mainlineDHT mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: self.torrent_store.close() self.torrent_store = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info("tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr: self.ltmgr.shutdown() self.ltmgr = None if self.threadpool: self.threadpool.cancel_all_pending_tasks() self.threadpool = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) self._logger.debug("tlm: network checkpointing: to file %s", filename) pstate.write_file(filename) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate # Events from core meant for API user # def sessconfig_changed_callback(self, section, name, new_value, old_value): value_changed = new_value != old_value if section == 'libtorrent' and name == 'utp': if self.ltmgr and value_changed: self.ltmgr.set_utp(new_value) elif section == 'libtorrent' and name == 'lt_proxyauth': if self.ltmgr: self.ltmgr.set_proxy_settings(None, *self.session.get_libtorrent_proxy_settings()) # Return True/False, depending on whether or not the config value can be changed at runtime. elif (section == 'general' and name in ['nickname', 'mugshot', 'videoanalyserpath']) or \ (section == 'libtorrent' and name in ['lt_proxytype', 'lt_proxyserver', 'anon_proxyserver', 'anon_proxytype', 'anon_proxyauth', 'anon_listen_port']) or \ (section == 'torrent_collecting' and name in ['stop_collecting_threshold']) or \ (section == 'tunnel_community' and name in ['socks5_listen_port']): return True else: return False return True