def run(): crypto = ECCrypto() dispersy = Dispersy( StandaloneEndpoint(options["port"], options["ip"]), options["statedir"], u'dispersy.db', crypto) if not dispersy.start(): raise RuntimeError("Unable to start Dispersy") master_member = TriblerChainCommunityCrawler.get_master_members( dispersy)[0] my_member = dispersy.get_member(private_key=crypto.key_to_bin( crypto.generate_key(u"curve25519"))) TriblerChainCommunityCrawler.init_community( dispersy, master_member, my_member) self._stopping = False def signal_handler(sig, frame): msg("Received signal '%s' in %s (shutting down)" % (sig, frame)) if not self._stopping: self._stopping = True dispersy.stop().addCallback(lambda _: reactor.stop()) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
class TriblerLaunchMany(TaskManager): def __init__(self): """ Called only once (unless we have multiple Sessions) by MainThread """ super(TriblerLaunchMany, self).__init__() self.initComplete = False self.registered = False self.dispersy = None self.state_cb_count = 0 self.previous_active_downloads = [] self.download_states_lc = None self.get_peer_list = [] self._logger = logging.getLogger(self.__class__.__name__) self.downloads = {} self.upnp_ports = [] self.session = None self.session_lock = None self.sessdoneflag = Event() self.shutdownstarttime = None # modules self.torrent_store = None self.metadata_store = None self.rtorrent_handler = None self.tftp_handler = None self.api_manager = None self.watch_folder = None self.version_check_manager = None self.resource_monitor = None self.category = None self.peer_db = None self.torrent_db = None self.mypref_db = None self.votecast_db = None self.channelcast_db = None self.search_manager = None self.channel_manager = None self.video_server = None self.mainline_dht = None self.ltmgr = None self.tracker_manager = None self.torrent_checker = None self.tunnel_community = None self.startup_deferred = Deferred() self.boosting_manager = None self.market_community = None def register(self, session, session_lock): assert isInIOThread() if not self.registered: self.registered = True self.session = session self.session_lock = session_lock # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root # certificates provided by the system trust store. if sys.platform == 'darwin': os.environ['SSL_CERT_FILE'] = os.path.join( get_lib_path(), 'root_certs_mac.pem') if self.session.config.get_torrent_store_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.torrent_store = LevelDbStore( self.session.config.get_torrent_store_dir()) if self.session.config.get_metadata_enabled(): from Tribler.Core.leveldbstore import LevelDbStore self.metadata_store = LevelDbStore( self.session.config.get_metadata_store_dir()) # torrent collecting: RemoteTorrentHandler if self.session.config.get_torrent_collecting_enabled(): from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler self.rtorrent_handler = RemoteTorrentHandler(self.session) # TODO(emilon): move this to a megacache component or smth if self.session.config.get_megacache_enabled(): from Tribler.Core.CacheDB.SqliteCacheDBHandler import ( PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, VoteCastDBHandler, ChannelCastDBHandler) from Tribler.Core.Category.Category import Category self._logger.debug('tlm: Reading Session state from %s', self.session.config.get_state_dir()) self.category = Category() # create DBHandlers self.peer_db = PeerDBHandler(self.session) self.torrent_db = TorrentDBHandler(self.session) self.mypref_db = MyPreferenceDBHandler(self.session) self.votecast_db = VoteCastDBHandler(self.session) self.channelcast_db = ChannelCastDBHandler(self.session) # initializes DBHandlers self.peer_db.initialize() self.torrent_db.initialize() self.mypref_db.initialize() self.votecast_db.initialize() self.channelcast_db.initialize() from Tribler.Core.Modules.tracker_manager import TrackerManager self.tracker_manager = TrackerManager(self.session) if self.session.config.get_video_server_enabled(): self.video_server = VideoServer( self.session.config.get_video_server_port(), self.session) self.video_server.start() # Dispersy self.tftp_handler = None if self.session.config.get_dispersy_enabled(): from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint # set communication endpoint endpoint = StandaloneEndpoint( self.session.config.get_dispersy_port()) working_directory = unicode( self.session.config.get_state_dir()) self.dispersy = Dispersy(endpoint, working_directory) # register TFTP service from Tribler.Core.TFTP.handler import TftpHandler self.tftp_handler = TftpHandler(self.session, endpoint, "fffffffd".decode('hex'), block_size=1024) self.tftp_handler.initialize() if self.session.config.get_torrent_search_enabled( ) or self.session.config.get_channel_search_enabled(): self.search_manager = SearchManager(self.session) self.search_manager.initialize() if not self.initComplete: self.init() self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED]) self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None) return self.startup_deferred def on_tribler_started(self, subject, changetype, objectID, *args): reactor.callFromThread(self.startup_deferred.callback, None) @blocking_call_on_reactor_thread def load_communities(self): self._logger.info("tribler: Preparing communities...") now_time = timemod.time() default_kwargs = {'tribler_session': self.session} # Search Community if self.session.config.get_torrent_search_enabled(): from Tribler.community.search.community import SearchCommunity self.dispersy.define_auto_load(SearchCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # AllChannel Community if self.session.config.get_channel_search_enabled(): from Tribler.community.allchannel.community import AllChannelCommunity self.dispersy.define_auto_load(AllChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # Channel Community if self.session.config.get_channel_community_enabled(): from Tribler.community.channel.community import ChannelCommunity self.dispersy.define_auto_load(ChannelCommunity, self.session.dispersy_member, load=True, kargs=default_kwargs) # PreviewChannel Community if self.session.config.get_preview_channel_community_enabled(): from Tribler.community.channel.preview import PreviewChannelCommunity self.dispersy.define_auto_load(PreviewChannelCommunity, self.session.dispersy_member, kargs=default_kwargs) # Tunnel Community mc_community = None if self.session.config.get_tunnel_community_enabled(): tunnel_settings = TunnelSettings(tribler_session=self.session) tunnel_kwargs = { 'tribler_session': self.session, 'settings': tunnel_settings } if self.session.config.get_trustchain_enabled(): trustchain_kwargs = {'tribler_session': self.session} # If the trustchain is enabled, we use the permanent trustchain keypair # for both the trustchain and the tunnel community keypair = self.session.trustchain_keypair dispersy_member = self.dispersy.get_member( private_key=keypair.key_to_bin()) from Tribler.community.triblerchain.community import TriblerChainCommunity mc_community = self.dispersy.define_auto_load( TriblerChainCommunity, dispersy_member, load=True, kargs=trustchain_kwargs)[0] else: keypair = self.dispersy.crypto.generate_key(u"curve25519") dispersy_member = self.dispersy.get_member( private_key=self.dispersy.crypto.key_to_bin(keypair)) from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity self.tunnel_community = self.dispersy.define_auto_load( HiddenTunnelCommunity, dispersy_member, load=True, kargs=tunnel_kwargs)[0] # We don't want to automatically load other instances of this community with other master members. self.dispersy.undefine_auto_load(HiddenTunnelCommunity) # Use the permanent TrustChain ID for Market community/TradeChain if it's available if self.session.config.get_market_community_enabled(): wallets = {} btc_wallet = BitcoinWallet( os.path.join(self.session.config.get_state_dir(), 'wallet'), testnet=self.session.config.get_btc_testnet()) wallets[btc_wallet.get_identifier()] = btc_wallet mc_wallet = TrustchainWallet(mc_community) wallets[mc_wallet.get_identifier()] = mc_wallet if self.session.config.get_dummy_wallets_enabled(): # For debugging purposes, we create dummy wallets dummy_wallet1 = DummyWallet1() wallets[dummy_wallet1.get_identifier()] = dummy_wallet1 dummy_wallet2 = DummyWallet2() wallets[dummy_wallet2.get_identifier()] = dummy_wallet2 from Tribler.community.market.community import MarketCommunity keypair = self.session.tradechain_keypair dispersy_member = self.dispersy.get_member( private_key=keypair.key_to_bin()) market_kwargs = { 'tribler_session': self.session, 'wallets': wallets } self.market_community = self.dispersy.define_auto_load( MarketCommunity, dispersy_member, load=True, kargs=market_kwargs)[0] self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", self.session.config.get_tunnel_community_socks5_listen_ports())) self._logger.info("tribler: communities are ready in %.2f seconds", timemod.time() - now_time) def init(self): if self.dispersy: from Tribler.dispersy.community import HardKilledCommunity self._logger.info("lmc: Starting Dispersy...") now = timemod.time() success = self.dispersy.start(self.session.autoload_discovery) diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy started successfully in %.2f seconds [port: %d]", diff, self.dispersy.wan_address[1]) else: self._logger.info( "lmc: Dispersy failed to start in %.2f seconds", diff) self.upnp_ports.append((self.dispersy.wan_address[1], 'UDP')) from Tribler.dispersy.crypto import M2CryptoSK private_key = self.dispersy.crypto.key_to_bin( M2CryptoSK(filename=self.session.config. get_permid_keypair_filename())) self.session.dispersy_member = blockingCallFromThread( reactor, self.dispersy.get_member, private_key=private_key) blockingCallFromThread(reactor, self.dispersy.define_auto_load, HardKilledCommunity, self.session.dispersy_member, load=True) if self.session.config.get_megacache_enabled(): self.dispersy.database.attach_commit_callback( self.session.sqlite_db.commit_now) # notify dispersy finished loading self.session.notifier.notify(NTFY_DISPERSY, NTFY_STARTED, None) self.load_communities() tunnel_community_ports = self.session.config.get_tunnel_community_socks5_listen_ports( ) self.session.config.set_anon_proxy_settings( 2, ("127.0.0.1", tunnel_community_ports)) if self.session.config.get_channel_search_enabled(): from Tribler.Core.Modules.channel.channel_manager import ChannelManager self.channel_manager = ChannelManager(self.session) self.channel_manager.initialize() if self.session.config.get_mainline_dht_enabled(): from Tribler.Core.DecentralizedTracking import mainlineDHT self.mainline_dht = mainlineDHT.init( ('127.0.0.1', self.session.config.get_mainline_dht_port()), self.session.config.get_state_dir()) self.upnp_ports.append( (self.session.config.get_mainline_dht_port(), 'UDP')) if self.session.config.get_libtorrent_enabled(): from Tribler.Core.Libtorrent.LibtorrentMgr import LibtorrentMgr self.ltmgr = LibtorrentMgr(self.session) self.ltmgr.initialize() for port, protocol in self.upnp_ports: self.ltmgr.add_upnp_mapping(port, protocol) # add task for tracker checking if self.session.config.get_torrent_checking_enabled(): self.torrent_checker = TorrentChecker(self.session) self.torrent_checker.initialize() if self.rtorrent_handler: self.rtorrent_handler.initialize() if self.api_manager: self.api_manager.root_endpoint.start_endpoints() if self.session.config.get_watch_folder_enabled(): self.watch_folder = WatchFolder(self.session) self.watch_folder.start() if self.session.config.get_credit_mining_enabled(): from Tribler.Core.CreditMining.BoostingManager import BoostingManager self.boosting_manager = BoostingManager(self.session) if self.session.config.get_resource_monitor_enabled(): self.resource_monitor = ResourceMonitor(self.session) self.resource_monitor.start() self.version_check_manager = VersionCheckManager(self.session) self.session.set_download_states_callback(self.sesscb_states_callback) self.initComplete = True def add(self, tdef, dscfg, pstate=None, setupDelay=0, hidden=False, share_mode=False, checkpoint_disabled=False): """ Called by any thread """ d = None with self.session_lock: if not isinstance( tdef, TorrentDefNoMetainfo) and not tdef.is_finalized(): raise ValueError("TorrentDef not finalized") infohash = tdef.get_infohash() # Create the destination directory if it does not exist yet try: if not os.path.isdir(dscfg.get_dest_dir()): os.makedirs(dscfg.get_dest_dir()) except OSError: self._logger.error( "Unable to create the download destination directory.") if dscfg.get_time_added() == 0: dscfg.set_time_added(int(timemod.time())) # Check if running or saved on disk if infohash in self.downloads: raise DuplicateDownloadException( "This download already exists.") from Tribler.Core.Libtorrent.LibtorrentDownloadImpl import LibtorrentDownloadImpl d = LibtorrentDownloadImpl(self.session, tdef) if pstate is None: # not already resuming pstate = self.load_download_pstate_noexc(infohash) if pstate is not None: self._logger.debug("tlm: add: pstate is %s %s", pstate.get('dlstate', 'status'), pstate.get('dlstate', 'progress')) # Store in list of Downloads, always. self.downloads[infohash] = d setup_deferred = d.setup(dscfg, pstate, wrapperDelay=setupDelay, share_mode=share_mode, checkpoint_disabled=checkpoint_disabled) setup_deferred.addCallback(self.on_download_handle_created) if d and not hidden and self.session.config.get_megacache_enabled(): @forceDBThread def write_my_pref(): torrent_id = self.torrent_db.getTorrentID(infohash) data = {'destination_path': d.get_dest_dir()} self.mypref_db.addMyPreference(torrent_id, data) if isinstance(tdef, TorrentDefNoMetainfo): self.torrent_db.addOrGetTorrentID(tdef.get_infohash()) self.torrent_db.updateTorrent(tdef.get_infohash(), name=tdef.get_name_as_unicode()) write_my_pref() elif self.rtorrent_handler: self.rtorrent_handler.save_torrent(tdef, write_my_pref) else: self.torrent_db.addExternalTorrent( tdef, extra_info={'status': 'good'}) write_my_pref() return d def on_download_handle_created(self, download): """ This method is called when the download handle has been created. Immediately checkpoint the download and write the resume data. """ return download.checkpoint() def remove(self, d, removecontent=False, removestate=True, hidden=False): """ Called by any thread """ out = None with self.session_lock: out = d.stop_remove(removestate=removestate, removecontent=removecontent) infohash = d.get_def().get_infohash() if infohash in self.downloads: del self.downloads[infohash] if not hidden: self.remove_id(infohash) if self.tunnel_community: self.tunnel_community.on_download_removed(d) return out or succeed(None) def remove_id(self, infohash): @forceDBThread def do_db(): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id: self.mypref_db.deletePreference(torrent_id) if self.session.config.get_megacache_enabled(): do_db() def get_downloads(self): """ Called by any thread """ with self.session_lock: return self.downloads.values() # copy, is mutable def get_download(self, infohash): """ Called by any thread """ with self.session_lock: return self.downloads.get(infohash, None) def download_exists(self, infohash): with self.session_lock: return infohash in self.downloads @blocking_call_on_reactor_thread @inlineCallbacks def update_download_hops(self, download, new_hops): """ Update the amount of hops for a specified download. This can be done on runtime. """ infohash = binascii.hexlify(download.tdef.get_infohash()) self._logger.info("Updating the amount of hops of download %s", infohash) yield self.session.remove_download(download) # copy the old download_config and change the hop count dscfg = download.copy() dscfg.set_hops(new_hops) self.session.start_download_from_tdef(download.tdef, dscfg) def update_trackers(self, infohash, trackers): """ Update the trackers for a download. :param infohash: infohash of the torrent that needs to be updated :param trackers: A list of tracker urls. """ dl = self.get_download(infohash) old_def = dl.get_def() if dl else None if old_def: old_trackers = old_def.get_trackers_as_single_tuple() new_trackers = list(set(trackers) - set(old_trackers)) all_trackers = list(old_trackers) + new_trackers if new_trackers: # Add new trackers to the download dl.add_trackers(new_trackers) # Create a new TorrentDef if isinstance(old_def, TorrentDefNoMetainfo): new_def = TorrentDefNoMetainfo(old_def.get_infohash(), old_def.get_name(), dl.get_magnet_link()) else: metainfo = old_def.get_metainfo() if len(all_trackers) > 1: metainfo["announce-list"] = [all_trackers] else: metainfo["announce"] = all_trackers[0] new_def = TorrentDef.load_from_dict(metainfo) # Set TorrentDef + checkpoint dl.set_def(new_def) dl.checkpoint() if isinstance(old_def, TorrentDefNoMetainfo): @forceDBThread def update_trackers_db(infohash, new_trackers): torrent_id = self.torrent_db.getTorrentID(infohash) if torrent_id is not None: self.torrent_db.addTorrentTrackerMappingInBatch( torrent_id, new_trackers) self.session.notifier.notify( NTFY_TORRENTS, NTFY_UPDATE, infohash) if self.session.config.get_megacache_enabled(): update_trackers_db(infohash, new_trackers) elif not isinstance( old_def, TorrentDefNoMetainfo) and self.rtorrent_handler: # Update collected torrents self.rtorrent_handler.save_torrent(new_def) # # State retrieval # def stop_download_states_callback(self): """ Stop any download states callback if present. """ if self.is_pending_task_active("download_states_lc"): self.cancel_pending_task("download_states_lc") def set_download_states_callback(self, user_callback, interval=1.0): """ Set the download state callback. Remove any old callback if it's present. """ self.stop_download_states_callback() self._logger.debug( "Starting the download state callback with interval %f", interval) self.download_states_lc = self.register_task( "download_states_lc", LoopingCall(self._invoke_states_cb, user_callback)) self.download_states_lc.start(interval) def _invoke_states_cb(self, callback): """ Invoke the download states callback with a list of the download states. """ dslist = [] for d in self.downloads.values(): d.set_moreinfo_stats( True in self.get_peer_list or d.get_def().get_infohash() in self.get_peer_list) ds = d.network_get_state(None, False) dslist.append(ds) def on_cb_done(new_get_peer_list): self.get_peer_list = new_get_peer_list return deferToThread(callback, dslist).addCallback(on_cb_done) def sesscb_states_callback(self, states_list): """ This method is periodically (every second) called with a list of the download states of the active downloads. """ self.state_cb_count += 1 # Check to see if a download has finished new_active_downloads = [] do_checkpoint = False seeding_download_list = [] for ds in states_list: state = ds.get_status() download = ds.get_download() tdef = download.get_def() safename = tdef.get_name_as_unicode() if state == DLSTATUS_DOWNLOADING: new_active_downloads.append(safename) elif state == DLSTATUS_STOPPED_ON_ERROR: self._logger.error("Error during download: %s", repr(ds.get_error())) self.downloads.get(tdef.get_infohash()).stop() self.session.notifier.notify(NTFY_TORRENT, NTFY_ERROR, tdef.get_infohash(), repr(ds.get_error())) elif state == DLSTATUS_SEEDING: seeding_download_list.append({ u'infohash': tdef.get_infohash(), u'download': download }) if safename in self.previous_active_downloads: self.session.notifier.notify(NTFY_TORRENT, NTFY_FINISHED, tdef.get_infohash(), safename) do_checkpoint = True self.previous_active_downloads = new_active_downloads if do_checkpoint: self.session.checkpoint_downloads() if self.state_cb_count % 4 == 0 and self.tunnel_community: self.tunnel_community.monitor_downloads(states_list) return [] # # Persistence methods # def load_checkpoint(self): """ Called by any thread """ def do_load_checkpoint(): with self.session_lock: for i, filename in enumerate( iglob( os.path.join( self.session.get_downloads_pstate_dir(), '*.state'))): self.resume_download(filename, setupDelay=i * 0.1) if self.initComplete: do_load_checkpoint() else: self.register_task("load_checkpoint", reactor.callLater(1, do_load_checkpoint)) def load_download_pstate_noexc(self, infohash): """ Called by any thread, assume session_lock already held """ try: basename = binascii.hexlify(infohash) + '.state' filename = os.path.join(self.session.get_downloads_pstate_dir(), basename) if os.path.exists(filename): return self.load_download_pstate(filename) else: self._logger.info("%s not found", basename) except Exception: self._logger.exception("Exception while loading pstate: %s", infohash) def resume_download(self, filename, setupDelay=0): tdef = dscfg = pstate = None try: pstate = self.load_download_pstate(filename) # SWIFTPROC metainfo = pstate.get('state', 'metainfo') if 'infohash' in metainfo: tdef = TorrentDefNoMetainfo(metainfo['infohash'], metainfo['name'], metainfo.get('url', None)) else: tdef = TorrentDef.load_from_dict(metainfo) if pstate.has_option('download_defaults', 'saveas') and \ isinstance(pstate.get('download_defaults', 'saveas'), tuple): pstate.set('download_defaults', 'saveas', pstate.get('download_defaults', 'saveas')[-1]) dscfg = DownloadStartupConfig(pstate) except: # pstate is invalid or non-existing _, file = os.path.split(filename) infohash = binascii.unhexlify(file[:-6]) torrent_data = self.torrent_store.get(infohash) if torrent_data: try: tdef = TorrentDef.load_from_memory(torrent_data) defaultDLConfig = DefaultDownloadStartupConfig.getInstance( ) dscfg = defaultDLConfig.copy() if self.mypref_db is not None: dest_dir = self.mypref_db.getMyPrefStatsInfohash( infohash) if dest_dir and os.path.isdir(dest_dir): dscfg.set_dest_dir(dest_dir) except ValueError: self._logger.warning("tlm: torrent data invalid") if pstate is not None: has_resume_data = pstate.get('state', 'engineresumedata') is not None self._logger.debug( "tlm: load_checkpoint: resumedata %s", 'len %s ' % len(pstate.get('state', 'engineresumedata')) if has_resume_data else 'None') if tdef and dscfg: if dscfg.get_dest_dir() != '': # removed torrent ignoring try: if not self.download_exists(tdef.get_infohash()): self.add(tdef, dscfg, pstate, setupDelay=setupDelay) else: self._logger.info( "tlm: not resuming checkpoint because download has already been added" ) except Exception as e: self._logger.exception( "tlm: load check_point: exception while adding download %s", tdef) else: self._logger.info("tlm: removing checkpoint %s destdir is %s", filename, dscfg.get_dest_dir()) os.remove(filename) else: self._logger.info("tlm: could not resume checkpoint %s %s %s", filename, tdef, dscfg) def checkpoint_downloads(self): """ Checkpoints all running downloads in Tribler. Even if the list of Downloads changes in the mean time this is no problem. For removals, dllist will still hold a pointer to the download, and additions are no problem (just won't be included in list of states returned via callback). """ downloads = self.downloads.values() deferred_list = [] self._logger.debug("tlm: checkpointing %s downloads", len(downloads)) for download in downloads: deferred_list.append(download.checkpoint()) return DeferredList(deferred_list) def shutdown_downloads(self): """ Shutdown all downloads in Tribler. """ for download in self.downloads.values(): download.stop() def remove_pstate(self, infohash): def do_remove(): if not self.download_exists(infohash): dlpstatedir = self.session.get_downloads_pstate_dir() # Remove checkpoint hexinfohash = binascii.hexlify(infohash) try: basename = hexinfohash + '.state' filename = os.path.join(dlpstatedir, basename) self._logger.debug( "remove pstate: removing dlcheckpoint entry %s", filename) if os.access(filename, os.F_OK): os.remove(filename) except: # Show must go on self._logger.exception("Could not remove state") else: self._logger.warning( "remove pstate: download is back, restarted? Canceling removal! %s", repr(infohash)) reactor.callFromThread(do_remove) @inlineCallbacks def early_shutdown(self): """ Called as soon as Session shutdown is initiated. Used to start shutdown tasks that takes some time and that can run in parallel to checkpointing, etc. :returns a Deferred that will fire once all dependencies acknowledge they have shutdown. """ self._logger.info("tlm: early_shutdown") self.cancel_all_pending_tasks() # Note: session_lock not held self.shutdownstarttime = timemod.time() if self.boosting_manager: yield self.boosting_manager.shutdown() self.boosting_manager = None if self.torrent_checker: yield self.torrent_checker.shutdown() self.torrent_checker = None if self.channel_manager: yield self.channel_manager.shutdown() self.channel_manager = None if self.search_manager: yield self.search_manager.shutdown() self.search_manager = None if self.rtorrent_handler: yield self.rtorrent_handler.shutdown() self.rtorrent_handler = None if self.video_server: yield self.video_server.shutdown_server() self.video_server = None if self.version_check_manager: self.version_check_manager.stop() self.version_check_manager = None if self.resource_monitor: self.resource_monitor.stop() self.resource_monitor = None self.tracker_manager = None if self.dispersy: self._logger.info("lmc: Shutting down Dispersy...") now = timemod.time() try: success = yield self.dispersy.stop() except: print_exc() success = False diff = timemod.time() - now if success: self._logger.info( "lmc: Dispersy successfully shutdown in %.2f seconds", diff) else: self._logger.info( "lmc: Dispersy failed to shutdown in %.2f seconds", diff) if self.metadata_store is not None: yield self.metadata_store.close() self.metadata_store = None if self.tftp_handler is not None: yield self.tftp_handler.shutdown() self.tftp_handler = None if self.channelcast_db is not None: yield self.channelcast_db.close() self.channelcast_db = None if self.votecast_db is not None: yield self.votecast_db.close() self.votecast_db = None if self.mypref_db is not None: yield self.mypref_db.close() self.mypref_db = None if self.torrent_db is not None: yield self.torrent_db.close() self.torrent_db = None if self.peer_db is not None: yield self.peer_db.close() self.peer_db = None if self.mainline_dht is not None: from Tribler.Core.DecentralizedTracking import mainlineDHT yield mainlineDHT.deinit(self.mainline_dht) self.mainline_dht = None if self.torrent_store is not None: yield self.torrent_store.close() self.torrent_store = None if self.api_manager is not None: yield self.api_manager.stop() self.api_manager = None if self.watch_folder is not None: yield self.watch_folder.stop() self.watch_folder = None def network_shutdown(self): try: self._logger.info("tlm: network_shutdown") ts = enumerate_threads() self._logger.info("tlm: Number of threads still running %d", len(ts)) for t in ts: self._logger.info( "tlm: Thread still running=%s, daemon=%s, instance=%s", t.getName(), t.isDaemon(), t) except: print_exc() # Stop network thread self.sessdoneflag.set() # Shutdown libtorrent session after checkpoints have been made if self.ltmgr is not None: self.ltmgr.shutdown() self.ltmgr = None def save_download_pstate(self, infohash, pstate): """ Called by network thread """ self.downloads[infohash].pstate_for_restart = pstate self.register_task("save_pstate %f" % timemod.clock(), self.downloads[infohash].save_resume_data()) def load_download_pstate(self, filename): """ Called by any thread """ pstate = CallbackConfigParser() pstate.read_file(filename) return pstate
class DispersyExperimentScriptClient(ExperimentClient): scenario_file = None def __init__(self, vars): ExperimentClient.__init__(self, vars) self._dispersy = None self._community = None self._database_file = u"dispersy.db" self._dispersy_exit_status = None self._is_joined = False self._strict = True self.community_args = [] self.community_kwargs = {} self._stats_file = None self._online_buffer = [] self._crypto = self.initializeCrypto() self.generateMyMember() self.vars['private_keypair'] = base64.encodestring(self.my_member_private_key) def onVarsSend(self): scenario_file_path = path.join(environ['EXPERIMENT_DIR'], self.scenario_file) self.scenario_runner = ScenarioRunner(scenario_file_path) t1 = time() self.scenario_runner._read_scenario(scenario_file_path) msg('Took %.2f to read scenario file' % (time() - t1)) def onIdReceived(self): self.scenario_runner.set_peernumber(int(self.my_id)) # TODO(emilon): Auto-register this stuff self.scenario_runner.register(self.echo) self.scenario_runner.register(self.online) self.scenario_runner.register(self.offline) self.scenario_runner.register(self.churn) self.scenario_runner.register(self.churn, 'churn_pattern') self.scenario_runner.register(self.set_community_kwarg) self.scenario_runner.register(self.set_database_file) self.scenario_runner.register(self.use_memory_database) self.scenario_runner.register(self.set_ignore_exceptions) self.scenario_runner.register(self.start_dispersy) self.scenario_runner.register(self.stop_dispersy) self.scenario_runner.register(self.stop) self.scenario_runner.register(self.set_master_member) self.scenario_runner.register(self.reset_dispersy_statistics, 'reset_dispersy_statistics') self.scenario_runner.register(self.annotate) self.scenario_runner.register(self.peertype) self.registerCallbacks() t1 = time() self.scenario_runner.parse_file() msg('Took %.2f to parse scenario file' % (time() - t1)) def startExperiment(self): msg("Starting dispersy scenario experiment") # TODO(emilon): Move this to the right place # TODO(emilon): Do we want to have the .dbs in the output dirs or should they be dumped to /tmp? my_dir = path.join(environ['OUTPUT_DIR'], self.my_id) makedirs(my_dir) chdir(my_dir) self._stats_file = open("statistics.log", 'w') # TODO(emilon): Fix me or kill me try: bootstrap_fn = path.join(environ['PROJECT_DIR'], 'tribler', 'bootstraptribler.txt') if not path.exists(bootstrap_fn): bootstrap_fn = path.join(environ['PROJECT_DIR'], 'bootstraptribler.txt') symlink(bootstrap_fn, 'bootstraptribler.txt') except OSError: pass self.scenario_runner.run() def registerCallbacks(self): pass def initializeCrypto(self): try: from Tribler.dispersy.crypto import ECCrypto, NoCrypto except: from dispersy.crypto import ECCrypto, NoCrypto if environ.get('TRACKER_CRYPTO', 'ECCrypto') == 'ECCrypto': msg('Turning on ECCrypto') return ECCrypto() msg('Turning off Crypto') return NoCrypto() @property def my_member_key_curve(self): # low (NID_sect233k1) isn't actually that low, switching to 160bits as this is comparable to rsa 1024 # http://www.nsa.gov/business/programs/elliptic_curve.shtml # speed difference when signing/verifying 100 items # NID_sect233k1 signing took 0.171 verify took 0.35 totals 0.521 # NID_secp160k1 signing took 0.04 verify took 0.04 totals 0.08 return u"NID_secp160k1" def generateMyMember(self): ec = self._crypto.generate_key(self.my_member_key_curve) self.my_member_key = self._crypto.key_to_bin(ec.pub()) self.my_member_private_key = self._crypto.key_to_bin(ec) # # Actions # def echo(self, *argv): msg("%s ECHO" % self.my_id, ' '.join(argv)) def set_community_args(self, args): """ Example: '1292333014,12923340000' """ self.community_args = args.split(',') def set_community_kwargs(self, kwargs): """ Example: 'startingtimestamp=1292333014,endingtimestamp=12923340000' """ for karg in kwargs.split(","): if "=" in karg: key, value = karg.split("=", 1) self.community_kwargs[key.strip()] = value.strip() def set_community_kwarg(self, key, value): self.community_kwargs[key] = value def set_database_file(self, filename): self._database_file = unicode(filename) def use_memory_database(self): self._database_file = u':memory:' def set_ignore_exceptions(self, boolean): self._strict = not self.str2bool(boolean) def start_dispersy(self, autoload_discovery=True): msg("Starting dispersy") # We need to import the stuff _AFTER_ configuring the logging stuff. try: from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint except: from dispersy.dispersy import Dispersy from dispersy.endpoint import StandaloneEndpoint self._dispersy = Dispersy(StandaloneEndpoint(int(self.my_id) + 12000, '0.0.0.0'), u'.', self._database_file, self._crypto) self._dispersy.statistics.enable_debug_statistics(True) self.original_on_incoming_packets = self._dispersy.on_incoming_packets if self._strict: def exception_handler(exception, fatal): msg("An exception occurred. Quitting because we are running with --strict enabled.") print >> stderr, "Exception was:" try: raise exception except: from traceback import print_exc print_exc() # Set Dispersy's exit status to error self._dispersy_exit_status = 1 # Stop the experiment reactor.callLater(1, self.stop) return True #self._dispersy.callback.attach_exception_handler(exception_handler) self._dispersy.start(autoload_discovery=autoload_discovery) if self.master_private_key: self._master_member = self._dispersy.get_member(private_key=self.master_private_key) else: self._master_member = self._dispersy.get_member(public_key=self.master_key) self._my_member = self._dispersy.get_member(private_key=self.my_member_private_key) assert self._master_member assert self._my_member self._do_log() self.print_on_change('community-kwargs', {}, self.community_kwargs) self.print_on_change('community-env', {}, {'pid':getpid()}) msg("Finished starting dispersy") def stop_dispersy(self): self._dispersy_exit_status = self._dispersy.stop() def stop(self, retry=3): retry = int(retry) if self._dispersy_exit_status is None and retry: reactor.callLater(1, self.stop, retry - 1) else: msg("Dispersy exit status was:", self._dispersy_exit_status) reactor.callLater(0, reactor.stop) def set_master_member(self, pub_key, priv_key=''): self.master_key = pub_key.decode("HEX") self.master_private_key = priv_key.decode("HEX") def online(self, dont_empty=False): msg("Trying to go online") if self._community is None: msg("online") msg("join community %s as %s" % (self._master_member.mid.encode("HEX"), self._my_member.mid.encode("HEX"))) self._dispersy.on_incoming_packets = self.original_on_incoming_packets self._community = self.community_class.init_community(self._dispersy, self._master_member, self._my_member, *self.community_args, **self.community_kwargs) self._community.auto_load = False assert self.is_online() if not dont_empty: self.empty_buffer() else: msg("online (we are already online)") def offline(self): msg("Trying to go offline") if self._community is None and self._is_joined: msg("offline (we are already offline)") else: msg("offline") for community in self._dispersy.get_communities(): community.unload_community() self._community = None self._dispersy.on_incoming_packets = lambda *params: None if self._database_file == u':memory:': msg("Be careful with memory databases and nodes going offline, you could be losing database because we're closing databases.") def is_online(self): return self._community != None def churn(self, *args): self.print_on_change('community-churn', {}, {'args':args}) def buffer_call(self, func, args, kargs): if len(self._online_buffer) == 0 and self.is_online(): func(*args, **kargs) else: self._online_buffer.append((func, args, kargs)) def empty_buffer(self): assert self.is_online() # perform all tasks which were scheduled while we were offline for func, args, kargs in self._online_buffer: try: func(*args, **kargs) except: print_exc() self._online_buffer = [] def reset_dispersy_statistics(self): self._dispersy._statistics.reset() def annotate(self, message): self._stats_file.write('%.1f %s %s %s\n' % (time(), self.my_id, "annotate", message)) def peertype(self, peertype): self._stats_file.write('%.1f %s %s %s\n' % (time(), self.my_id, "peertype", peertype)) # # Aux. functions # def get_private_keypair_by_id(self, peer_id): if str(peer_id) in self.all_vars: key = self.all_vars[str(peer_id)]['private_keypair'] if isinstance(key, basestring): key = self.all_vars[str(peer_id)]['private_keypair'] = self._crypto.key_from_private_bin(base64.decodestring(key)) return key def get_private_keypair(self, ip, port): port = int(port) for peer_dict in self.all_vars.itervalues(): if peer_dict['host'] == ip and int(peer_dict['port']) == port: key = peer_dict['private_keypair'] if isinstance(key, basestring): key = peer_dict['private_keypair'] = self._crypto.key_from_private_bin(base64.decodestring(key)) return key err("Could not get_private_keypair for", ip, port) def str2bool(self, v): return v.lower() in ("yes", "true", "t", "1") def str2tuple(self, v): if len(v) > 1 and v[1] == "t": return (int(v[0]), int(v[2:])) if len(v) > 1 and v[1] == ".": return float(v) return int(v) def print_on_change(self, name, prev_dict, cur_dict): def get_changed_values(prev_dict, cur_dict): new_values = {} changed_values = {} if cur_dict: for key, value in cur_dict.iteritems(): # convert key to make it printable if not isinstance(key, (basestring, int, long, float)): key = str(key) # if this is a dict, recursively check for changed values if isinstance(value, dict): converted_dict, changed_in_dict = get_changed_values(prev_dict.get(key, {}), value) new_values[key] = converted_dict if changed_in_dict: changed_values[key] = changed_in_dict # else convert and compare single value else: if not isinstance(value, (basestring, int, long, float, Iterable)): value = str(value) new_values[key] = value if prev_dict.get(key, None) != value: changed_values[key] = value return new_values, changed_values new_values, changed_values = get_changed_values(prev_dict, cur_dict) if changed_values: self._stats_file.write('%.1f %s %s %s\n' % (time(), self.my_id, name, json.dumps(changed_values))) self._stats_file.flush() return new_values return prev_dict @inlineCallbacks def _do_log(self): try: from Tribler.dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME except: from dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME total_stumbled_candidates = defaultdict(lambda:defaultdict(set)) prev_statistics = {} prev_total_received = {} prev_total_dropped = {} prev_total_delayed = {} prev_total_outgoing = {} prev_total_fail = {} prev_endpoint_recv = {} prev_endpoint_send = {} prev_created_messages = {} prev_bootstrap_candidates = {} while True: self._dispersy.statistics.update() communities_dict = {} for c in self._dispersy.statistics.communities: if c._community.dispersy_enable_candidate_walker: # determine current size of candidates categories nr_walked = nr_intro = nr_stumbled = 0 # we add all candidates which have a last_stumble > now - CANDIDATE_STUMBLE_LIFETIME now = time() for candidate in c._community.candidates.itervalues(): if candidate.last_stumble > now - CANDIDATE_STUMBLE_LIFETIME: nr_stumbled += 1 mid = candidate.get_member().mid total_stumbled_candidates[c.hex_cid][candidate.last_stumble].add(mid) if candidate.last_walk > now - CANDIDATE_WALK_LIFETIME: nr_walked += 1 if candidate.last_intro > now - CANDIDATE_INTRO_LIFETIME: nr_intro += 1 else: nr_walked = nr_intro = nr_stumbled = "?" total_nr_stumbled_candidates = sum(len(members) for members in total_stumbled_candidates[c.hex_cid].values()) communities_dict[c.hex_cid] = {'classification': c.classification, 'global_time': c.global_time, 'sync_bloom_new': c.sync_bloom_new, 'sync_bloom_reuse': c.sync_bloom_reuse, 'sync_bloom_send': c.sync_bloom_send, 'sync_bloom_skip': c.sync_bloom_skip, 'nr_candidates': len(c.candidates) if c.candidates else 0, 'nr_walked': nr_walked, 'nr_stumbled': nr_stumbled, 'nr_intro' : nr_intro, 'total_stumbled_candidates': total_nr_stumbled_candidates} # check for missing communities, reset candidates to 0 cur_cids = communities_dict.keys() for cid, c in prev_statistics.get('communities', {}).iteritems(): if cid not in cur_cids: _c = c.copy() _c['nr_candidates'] = "?" _c['nr_walked'] = "?" _c['nr_stumbled'] = "?" _c['nr_intro'] = "?" communities_dict[cid] = _c statistics_dict = {'conn_type': self._dispersy.statistics.connection_type, 'received_count': self._dispersy.statistics.total_received, 'success_count': self._dispersy.statistics.msg_statistics.success_count, 'drop_count': self._dispersy.statistics.msg_statistics.drop_count, 'delay_count': self._dispersy.statistics.msg_statistics.delay_received_count, 'delay_success': self._dispersy.statistics.msg_statistics.delay_success_count, 'delay_timeout': self._dispersy.statistics.msg_statistics.delay_timeout_count, 'delay_send': self._dispersy.statistics.msg_statistics.delay_send_count, 'created_count': self._dispersy.statistics.msg_statistics.created_count, 'total_up': self._dispersy.statistics.total_up, 'total_down': self._dispersy.statistics.total_down, 'total_send': self._dispersy.statistics.total_send, 'cur_sendqueue': self._dispersy.statistics.cur_sendqueue, 'total_candidates_discovered': self._dispersy.statistics.total_candidates_discovered, 'walk_attempt': self._dispersy.statistics.walk_attempt_count, 'walk_success': self._dispersy.statistics.walk_success_count, 'walk_invalid_response_identifier': self._dispersy.statistics.invalid_response_identifier_count, 'is_online': self.is_online(), 'communities': communities_dict} prev_statistics = self.print_on_change("statistics", prev_statistics, statistics_dict) prev_total_dropped = self.print_on_change("statistics-dropped-messages", prev_total_dropped, self._dispersy.statistics.msg_statistics.drop_dict) prev_total_delayed = self.print_on_change("statistics-delayed-messages", prev_total_delayed, self._dispersy.statistics.msg_statistics.delay_dict) prev_total_received = self.print_on_change("statistics-successful-messages", prev_total_received, self._dispersy.statistics.msg_statistics.success_dict) prev_total_outgoing = self.print_on_change("statistics-outgoing-messages", prev_total_outgoing, self._dispersy.statistics.msg_statistics.outgoing_dict) prev_created_messages = self.print_on_change("statistics-created-messages", prev_created_messages, self._dispersy.statistics.msg_statistics.created_dict) prev_total_fail = self.print_on_change("statistics-walk-fail", prev_total_fail, self._dispersy.statistics.walk_failure_dict) prev_endpoint_recv = self.print_on_change("statistics-endpoint-recv", prev_endpoint_recv, self._dispersy.statistics.endpoint_recv) prev_endpoint_send = self.print_on_change("statistics-endpoint-send", prev_endpoint_send, self._dispersy.statistics.endpoint_send) yield deferLater(reactor, 5.0, lambda : None)
class DispersyExperimentScriptClient(ExperimentClient): scenario_file = None def __init__(self, vars): super(DispersyExperimentScriptClient, self).__init__(vars) self._dispersy = None self._community = None self._database_file = u"dispersy.db" self._dispersy_exit_status = None self._is_joined = False self._strict = True self.community_args = [] self.community_kwargs = {} self._stats_file = None self._online_buffer = [] self._crypto = self.initializeCrypto() self.generateMyMember() self.vars['private_keypair'] = base64.encodestring( self.my_member_private_key) def onVarsSend(self): scenario_file_path = path.join(environ['EXPERIMENT_DIR'], self.scenario_file) self.scenario_runner = ScenarioRunner(scenario_file_path) t1 = time() self.scenario_runner._read_scenario(scenario_file_path) self._logger.debug('Took %.2f to read scenario file', time() - t1) def onIdReceived(self): self._logger.debug('Got ID %s assigned', self.my_id) self.scenario_runner.set_peernumber(int(self.my_id)) # TODO(emilon): Auto-register this stuff self.scenario_runner.register(self.echo) self.scenario_runner.register(self.online) self.scenario_runner.register(self.offline) self.scenario_runner.register(self.churn) self.scenario_runner.register(self.churn, 'churn_pattern') self.scenario_runner.register(self.set_community_kwarg) self.scenario_runner.register(self.set_database_file) self.scenario_runner.register(self.use_memory_database) self.scenario_runner.register(self.set_ignore_exceptions) self.scenario_runner.register(self.start_dispersy) self.scenario_runner.register(self.stop_dispersy) self.scenario_runner.register(self.stop) self.scenario_runner.register(self.set_master_member) self.scenario_runner.register(self.reset_dispersy_statistics, 'reset_dispersy_statistics') self.scenario_runner.register(self.annotate) self.scenario_runner.register(self.peertype) self.registerCallbacks() t1 = time() self.scenario_runner.parse_file() self._logger.debug('Took %.2f to parse scenario file', time() - t1) def startExperiment(self): self._logger.debug("Starting dispersy scenario experiment") # TODO(emilon): Move this to the right place # TODO(emilon): Do we want to have the .dbs in the output dirs or should they be dumped to /tmp? my_dir = path.join(environ['OUTPUT_DIR'], self.my_id) makedirs(my_dir) chdir(my_dir) self._stats_file = open("statistics.log", 'w') # TODO(emilon): Fix me or kill me try: bootstrap_fn = path.join(environ['PROJECT_DIR'], 'tribler', 'bootstraptribler.txt') if not path.exists(bootstrap_fn): bootstrap_fn = path.join(environ['PROJECT_DIR'], '..', 'bootstraptribler.txt') symlink(bootstrap_fn, 'bootstraptribler.txt') except OSError: pass self.scenario_runner.run() def registerCallbacks(self): pass def initializeCrypto(self): try: from Tribler.dispersy.crypto import ECCrypto, NoCrypto except: from dispersy.crypto import ECCrypto, NoCrypto if environ.get('TRACKER_CRYPTO', 'ECCrypto') == 'ECCrypto': self._logger.debug('Turning on ECCrypto') return ECCrypto() self._logger.debug('Turning off Crypto') return NoCrypto() @property def my_member_key_curve(self): # low (NID_sect233k1) isn't actually that low, switching to 160bits as this is comparable to rsa 1024 # http://www.nsa.gov/business/programs/elliptic_curve.shtml # speed difference when signing/verifying 100 items # NID_sect233k1 signing took 0.171 verify took 0.35 totals 0.521 # NID_secp160k1 signing took 0.04 verify took 0.04 totals 0.08 return u"NID_secp160k1" def generateMyMember(self): ec = self._crypto.generate_key(self.my_member_key_curve) self.my_member_key = self._crypto.key_to_bin(ec.pub()) self.my_member_private_key = self._crypto.key_to_bin(ec) # # Actions # def echo(self, *argv): self._logger.debug("%s ECHO %s", self.my_id, ' '.join(argv)) def set_community_args(self, args): """ Example: '1292333014,12923340000' """ self.community_args = args.split(',') def set_community_kwargs(self, kwargs): """ Example: 'startingtimestamp=1292333014,endingtimestamp=12923340000' """ for karg in kwargs.split(","): if "=" in karg: key, value = karg.split("=", 1) self.community_kwargs[key.strip()] = value.strip() def set_community_kwarg(self, key, value): self.community_kwargs[key] = value def set_database_file(self, filename): self._database_file = unicode(filename) def use_memory_database(self): self._database_file = u':memory:' def set_ignore_exceptions(self, boolean): self._strict = not self.str2bool(boolean) def start_dispersy(self, autoload_discovery=True): self._logger.debug("Starting dispersy") # We need to import the stuff _AFTER_ configuring the logging stuff. try: from Tribler.dispersy.dispersy import Dispersy from Tribler.dispersy.endpoint import StandaloneEndpoint from Tribler.dispersy.util import unhandled_error_observer except: from dispersy.dispersy import Dispersy from dispersy.endpoint import StandaloneEndpoint from dispersy.util import unhandled_error_observer self._dispersy = Dispersy( StandaloneEndpoint(int(self.my_id) + 12000, '0.0.0.0'), u'.', self._database_file, self._crypto) self._dispersy.statistics.enable_debug_statistics(True) self.original_on_incoming_packets = self._dispersy.on_incoming_packets if self._strict: from twisted.python.log import addObserver addObserver(unhandled_error_observer) self._dispersy.start(autoload_discovery=autoload_discovery) if self.master_private_key: self._master_member = self._dispersy.get_member( private_key=self.master_private_key) else: self._master_member = self._dispersy.get_member( public_key=self.master_key) self._my_member = self.get_my_member() assert self._master_member assert self._my_member self._do_log() self.print_on_change('community-kwargs', {}, self.community_kwargs) self.print_on_change('community-env', {}, {'pid': getpid()}) self._logger.debug("Finished starting dispersy") def get_my_member(self): return self._dispersy.get_member( private_key=self.my_member_private_key) def stop_dispersy(self): self._dispersy_exit_status = self._dispersy.stop() def stop(self, retry=3): retry = int(retry) if self._dispersy_exit_status is None and retry: reactor.callLater(1, self.stop, retry - 1) else: self._logger.debug("Dispersy exit status was: %s", self._dispersy_exit_status) reactor.callLater(0, reactor.stop) def set_master_member(self, pub_key, priv_key=''): self.master_key = pub_key.decode("HEX") self.master_private_key = priv_key.decode("HEX") def online(self, dont_empty=False): self._logger.debug("Trying to go online") if self._community is None: self._logger.debug("online") self._logger.debug("join community %s as %s", self._master_member.mid.encode("HEX"), self._my_member.mid.encode("HEX")) self._dispersy.on_incoming_packets = self.original_on_incoming_packets self._community = self.community_class.init_community( self._dispersy, self._master_member, self._my_member, *self.community_args, **self.community_kwargs) self._community.auto_load = False assert self.is_online() if not dont_empty: self.empty_buffer() self._logger.debug("Dispersy is using port %s", repr(self._dispersy._endpoint.get_address())) else: self._logger.debug("online (we are already online)") def offline(self): self._logger.debug("Trying to go offline") if self._community is None and self._is_joined: self._logger.debug("offline (we are already offline)") else: self._logger.debug("offline") for community in self._dispersy.get_communities(): community.unload_community() self._community = None self._dispersy.on_incoming_packets = lambda *params: None if self._database_file == u':memory:': self._logger.debug( "Be careful with memory databases and nodes going offline, " "you could be losing database because we're closing databases." ) def is_online(self): return self._community != None def churn(self, *args): self.print_on_change('community-churn', {}, {'args': args}) def buffer_call(self, func, args, kargs): if len(self._online_buffer) == 0 and self.is_online(): func(*args, **kargs) else: self._online_buffer.append((func, args, kargs)) def empty_buffer(self): assert self.is_online() # perform all tasks which were scheduled while we were offline for func, args, kargs in self._online_buffer: try: func(*args, **kargs) except: print_exc() self._online_buffer = [] def reset_dispersy_statistics(self): self._dispersy._statistics.reset() def annotate(self, message): self._stats_file.write('%.1f %s %s %s\n' % (time(), self.my_id, "annotate", message)) def peertype(self, peertype): self._stats_file.write('%.1f %s %s %s\n' % (time(), self.my_id, "peertype", peertype)) # # Aux. functions # def get_private_keypair_by_id(self, peer_id): if str(peer_id) in self.all_vars: key = self.all_vars[str(peer_id)]['private_keypair'] if isinstance(key, basestring): key = self.all_vars[str(peer_id)][ 'private_keypair'] = self._crypto.key_from_private_bin( base64.decodestring(key)) return key def get_private_keypair(self, ip, port): port = int(port) for peer_dict in self.all_vars.itervalues(): if peer_dict['host'] == ip and int(peer_dict['port']) == port: key = peer_dict['private_keypair'] if isinstance(key, basestring): key = peer_dict[ 'private_keypair'] = self._crypto.key_from_private_bin( base64.decodestring(key)) return key err("Could not get_private_keypair for", ip, port) def str2bool(self, v): return v.lower() in ("yes", "true", "t", "1") def str2tuple(self, v): if len(v) > 1 and v[1] == "t": return (int(v[0]), int(v[2:])) if len(v) > 1 and v[1] == ".": return float(v) return int(v) def print_on_change(self, name, prev_dict, cur_dict): def get_changed_values(prev_dict, cur_dict): new_values = {} changed_values = {} if cur_dict: for key, value in cur_dict.iteritems(): # convert key to make it printable if not isinstance(key, (basestring, int, long, float)): key = str(key) # if this is a dict, recursively check for changed values if isinstance(value, dict): converted_dict, changed_in_dict = get_changed_values( prev_dict.get(key, {}), value) new_values[key] = converted_dict if changed_in_dict: changed_values[key] = changed_in_dict # else convert and compare single value else: if not isinstance( value, (basestring, int, long, float, Iterable)): value = str(value) new_values[key] = value if prev_dict.get(key, None) != value: changed_values[key] = value return new_values, changed_values new_values, changed_values = get_changed_values(prev_dict, cur_dict) if changed_values: self._stats_file.write( '%.1f %s %s %s\n' % (time(), self.my_id, name, json.dumps(changed_values))) self._stats_file.flush() return new_values return prev_dict @inlineCallbacks def _do_log(self): try: from Tribler.dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME except: from dispersy.candidate import CANDIDATE_STUMBLE_LIFETIME, CANDIDATE_WALK_LIFETIME, CANDIDATE_INTRO_LIFETIME total_stumbled_candidates = defaultdict(lambda: defaultdict(set)) prev_statistics = {} prev_total_received = {} prev_total_dropped = {} prev_total_delayed = {} prev_total_outgoing = {} prev_total_fail = {} prev_endpoint_recv = {} prev_endpoint_send = {} prev_created_messages = {} prev_bootstrap_candidates = {} while True: self._dispersy.statistics.update() communities_dict = {} for c in self._dispersy.statistics.communities: if c._community.dispersy_enable_candidate_walker: # determine current size of candidates categories nr_walked = nr_intro = nr_stumbled = 0 # we add all candidates which have a last_stumble > now - CANDIDATE_STUMBLE_LIFETIME now = time() for candidate in c._community.candidates.itervalues(): if candidate.last_stumble > now - CANDIDATE_STUMBLE_LIFETIME: nr_stumbled += 1 mid = candidate.get_member().mid total_stumbled_candidates[c.hex_cid][ candidate.last_stumble].add(mid) if candidate.last_walk > now - CANDIDATE_WALK_LIFETIME: nr_walked += 1 if candidate.last_intro > now - CANDIDATE_INTRO_LIFETIME: nr_intro += 1 else: nr_walked = nr_intro = nr_stumbled = "?" total_nr_stumbled_candidates = sum( len(members) for members in total_stumbled_candidates[ c.hex_cid].values()) communities_dict[c.hex_cid] = { 'classification': c.classification, 'global_time': c.global_time, 'sync_bloom_new': c.sync_bloom_new, 'sync_bloom_reuse': c.sync_bloom_reuse, 'sync_bloom_send': c.sync_bloom_send, 'sync_bloom_skip': c.sync_bloom_skip, 'nr_candidates': len(c.candidates) if c.candidates else 0, 'nr_walked': nr_walked, 'nr_stumbled': nr_stumbled, 'nr_intro': nr_intro, 'total_stumbled_candidates': total_nr_stumbled_candidates } # check for missing communities, reset candidates to 0 cur_cids = communities_dict.keys() for cid, c in prev_statistics.get('communities', {}).iteritems(): if cid not in cur_cids: _c = c.copy() _c['nr_candidates'] = "?" _c['nr_walked'] = "?" _c['nr_stumbled'] = "?" _c['nr_intro'] = "?" communities_dict[cid] = _c statistics_dict = { 'conn_type': self._dispersy.statistics.connection_type, 'received_count': self._dispersy.statistics.total_received, 'success_count': self._dispersy.statistics.msg_statistics.success_count, 'drop_count': self._dispersy.statistics.msg_statistics.drop_count, 'delay_count': self._dispersy.statistics.msg_statistics.delay_received_count, 'delay_success': self._dispersy.statistics.msg_statistics.delay_success_count, 'delay_timeout': self._dispersy.statistics.msg_statistics.delay_timeout_count, 'delay_send': self._dispersy.statistics.msg_statistics.delay_send_count, 'created_count': self._dispersy.statistics.msg_statistics.created_count, 'total_up': self._dispersy.statistics.total_up, 'total_down': self._dispersy.statistics.total_down, 'total_send': self._dispersy.statistics.total_send, 'cur_sendqueue': self._dispersy.statistics.cur_sendqueue, 'total_candidates_discovered': self._dispersy.statistics.total_candidates_discovered, 'walk_attempt': self._dispersy.statistics.walk_attempt_count, 'walk_success': self._dispersy.statistics.walk_success_count, 'walk_invalid_response_identifier': self._dispersy.statistics.invalid_response_identifier_count, 'is_online': self.is_online(), 'communities': communities_dict } prev_statistics = self.print_on_change("statistics", prev_statistics, statistics_dict) prev_total_dropped = self.print_on_change( "statistics-dropped-messages", prev_total_dropped, self._dispersy.statistics.msg_statistics.drop_dict) prev_total_delayed = self.print_on_change( "statistics-delayed-messages", prev_total_delayed, self._dispersy.statistics.msg_statistics.delay_dict) prev_total_received = self.print_on_change( "statistics-successful-messages", prev_total_received, self._dispersy.statistics.msg_statistics.success_dict) prev_total_outgoing = self.print_on_change( "statistics-outgoing-messages", prev_total_outgoing, self._dispersy.statistics.msg_statistics.outgoing_dict) prev_created_messages = self.print_on_change( "statistics-created-messages", prev_created_messages, self._dispersy.statistics.msg_statistics.created_dict) prev_total_fail = self.print_on_change( "statistics-walk-fail", prev_total_fail, self._dispersy.statistics.walk_failure_dict) prev_endpoint_recv = self.print_on_change( "statistics-endpoint-recv", prev_endpoint_recv, self._dispersy.statistics.endpoint_recv) prev_endpoint_send = self.print_on_change( "statistics-endpoint-send", prev_endpoint_send, self._dispersy.statistics.endpoint_send) yield deferLater(reactor, 5.0, lambda: None)