def make_new_connection(pubkey): communicator = _get_communicator(g) resp = "pending" if pubkey in communicator.shared_state.get( pool.ServicePool).bootstrap_pending: return Response(resp) if pubkey in communicator.direct_connection_clients: resp = communicator.direct_connection_clients[pubkey] else: """Spawn a thread that will create the client and eventually add it to the communicator.active_services """ threading.Thread( target=onionrservices.OnionrServices().create_client, args=[pubkey, communicator], daemon=True).start() return Response(resp)
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.storage_counter = storagecounter.StorageCounter() self.isOnline = True # Assume we're connected to the internet self.shared_state = shared_state # TooManyObjects module # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort # Upload information, list of blocks to upload self.blocksToUpload = [] self.upload_session_manager = self.shared_state.get(uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 1 # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] self.cooldownPeer = {} self.connectTimes = {} # list of peer's profiles (onionrpeers.PeerProfile instances) self.peerProfiles = [] # Peers merged to us. Don't add to db until we know they're reachable self.newPeers = [] self.announceProgress = {} self.announceCache = {} self.generating_blocks = [] # amount of threads running by name, used to prevent too many self.threadCounts = {} # set true when shutdown command received self.shutdown = False # list of new blocks to download, added to when new block lists are fetched from peers self.blockQueue = {} # list of blocks currently downloading, avoid s self.currentDownloading = [] # timestamp when the last online node was seen self.lastNodeSeen = None # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time self.dbTimestamps = {} # Clear the daemon queue for any dead messages if os.path.exists(dbfiles.daemon_queue_db): daemonqueue.clear_daemon_queue() # Loads in and starts the enabled plugins plugins.reload() # time app started running for info/statistics purposes self.startTime = epoch.get_epoch() uploadqueue.UploadQueue(self) # extends our upload list and saves our list when Onionr exits if developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 30) # Set timers, function reference, seconds # requires_peer True means the timer function won't fire if we have no connected peers peerPoolTimer = OnionrCommunicatorTimers(self, onlinepeers.get_online_peers, 60, max_threads=1, my_args=[self]) OnionrCommunicatorTimers(self, self.runCheck, 2, max_threads=1) # Timers to periodically lookup new blocks and download them lookup_blocks_timer = OnionrCommunicatorTimers(self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1) # The block download timer is accessed by the block lookup function to trigger faster download starts self.download_blocks_timer = OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5) # Timer to reset the longest offline peer so contact can be attempted again OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self]) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers(self, housekeeping.clean_old_blocks, 20, my_args=[self]) # Timer to discover new peers OnionrCommunicatorTimers(self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2) # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1) # Timer to process the daemon command queue OnionrCommunicatorTimers(self, daemonqueuehandler.handle_daemon_commands, 6, my_args=[self], max_threads=3) # Setup direct connections if config.get('general.socket_servers', False): self.services = onionrservices.OnionrServices() self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self]) else: self.services = None # {peer_pubkey: ephemeral_address}, the address to reach them self.direct_connection_clients = {} # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers(self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, through Tor to various high-profile onion services netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1) # Announce the public API server transport address to other nodes if security level allows if config.get('general.security_level', 1) == 0 and config.get('general.announce_node', True): # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers(self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1) announceTimer.count = (announceTimer.frequency - 120) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True) # Timer to cleanup dead ephemeral forward secrecy keys forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1) # Adjust initial timer triggers peerPoolTimer.count = (peerPoolTimer.frequency - 1) cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 2) lookup_blocks_timer = (lookup_blocks_timer.frequency - 2) shared_state.add(self) if config.get('general.use_bootstrap', True): bootstrappeers.add_bootstrap_list_to_peer_list(self, [], db_only=True) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) while not config.get('onboarding.done', True): time.sleep(5) # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking try: while not self.shutdown: for i in self.timers: if self.shutdown: break i.processTimer() time.sleep(self.delay) # Debug to print out used FDs (regular and net) #proc = psutil.Process() #print(proc.open_files(), len(psutil.net_connections())) except KeyboardInterrupt: self.shutdown = True pass logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True) try: self.service_greenlets except AttributeError: pass else: # Stop onionr direct connection services for server in self.service_greenlets: server.stop() localcommand.local_command('shutdown') # shutdown the api try: time.sleep(0.5) except KeyboardInterrupt: pass
def __init__(self, onionrInst, proxyPort, developmentMode=config.get('general.dev_mode', False)): onionrInst.communicatorInst = self # configure logger and stuff onionr.Onionr.setupConfig('data/', self=self) self.proxyPort = proxyPort self.isOnline = True # Assume we're connected to the internet # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = proxyPort self._core = onionrInst.onionrCore self.blocksToUpload = [] # loop time.sleep delay in seconds self.delay = 1 # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] self.cooldownPeer = {} self.connectTimes = {} self.peerProfiles = [ ] # list of peer's profiles (onionrpeers.PeerProfile instances) self.newPeers = [ ] # Peers merged to us. Don't add to db until we know they're reachable self.announceProgress = {} self.announceCache = {} # amount of threads running by name, used to prevent too many self.threadCounts = {} # set true when shutdown command received self.shutdown = False # list of new blocks to download, added to when new block lists are fetched from peers self.blockQueue = {} # list of blocks currently downloading, avoid s self.currentDownloading = [] # timestamp when the last online node was seen self.lastNodeSeen = None # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time self.dbTimestamps = {} # Clear the daemon queue for any dead messages if os.path.exists(self._core.queueDB): self._core.clearDaemonQueue() # Loads in and starts the enabled plugins plugins.reload() # time app started running for info/statistics purposes self.startTime = self._core._utils.getEpoch() if developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 30) # Set timers, function reference, seconds # requiresPeer True means the timer function won't fire if we have no connected peers peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1) OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1) # Timers to periodically lookup new blocks and download them OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2) # Timer to reset the longest offline peer so contact can be attempted again OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers( self, housekeeping.clean_old_blocks, 65, myArgs=[self]) # Timer to discover new peers OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, myArgs=[self], requiresPeer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, self.uploadBlock, 5, requiresPeer=True, maxThreads=1) # Timer to process the daemon command queue OnionrCommunicatorTimers(self, self.daemonCommands, 6, maxThreads=3) # Timer that kills Onionr if the API server crashes OnionrCommunicatorTimers(self, self.detectAPICrash, 30, maxThreads=1) # Setup direct connections if config.get('general.socket_servers', False): self.services = onionrservices.OnionrServices(self._core) self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self]) else: self.services = None # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers( self, deniableinserts.insert_deniable_block, 180, myArgs=[self], requiresPeer=True, maxThreads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, through Tor to various high-profile onion services netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 600, myArgs=[self]) # Announce the public API server transport address to other nodes if security level allows if config.get('general.security_level', 1) == 0: # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers( self, announcenode.announce_node, 3600, myArgs=[self], requiresPeer=True, maxThreads=1) announceTimer.count = (announceTimer.frequency - 120) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) # Timer to cleanup dead ephemeral forward secrecy keys forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, myArgs=[self], maxThreads=1) # Adjust initial timer triggers peerPoolTimer.count = (peerPoolTimer.frequency - 1) cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 5) # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking try: while not self.shutdown: for i in self.timers: if self.shutdown: break i.processTimer() time.sleep(self.delay) # Debug to print out used FDs (regular and net) #proc = psutil.Process() #print(proc.open_files(), len(psutil.net_connections())) except KeyboardInterrupt: self.shutdown = True pass logger.info('Goodbye. (Onionr is cleaning up, and will exit)') try: self.service_greenlets except AttributeError: pass else: for server in self.service_greenlets: server.stop() self._core._utils.localCommand('shutdown') # shutdown the api time.sleep(0.5)
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.shared_state = shared_state # TooManyObjects module # populate kv values self.kv = self.shared_state.get_by_string('DeadSimpleKV') if config.get('general.offline_mode', False): self.kv.put('isOnline', False) # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort self.upload_session_manager = self.shared_state.get( uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 1 # amount of threads running by name, used to prevent too many self.threadCounts = {} # Loads in and starts the enabled plugins plugins.reload() # extends our upload list and saves our list when Onionr exits uploadqueue.UploadQueue(self) # Timers to periodically lookup new blocks and download them lookup_blocks_timer = OnionrCommunicatorTimers( self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1) """The block download timer is accessed by the block lookup function to trigger faster download starts""" self.download_blocks_timer = OnionrCommunicatorTimers( self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5) # Timer to reset the longest offline peer # so contact can be attempted again OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self], max_threads=1) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers( self, housekeeping.clean_old_blocks, 20, my_args=[self], max_threads=1) # Timer to discover new peers OnionrCommunicatorTimers( self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2) # Timer for adjusting which peers # we actively communicate to at any given time, # to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1) # Setup direct connections if config.get('general.ephemeral_tunnels', False): self.services = onionrservices.OnionrServices() self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self]) else: self.services = None # {peer_pubkey: ephemeral_address}, the address to reach them self.direct_connection_clients = {} # This timer creates deniable blocks, # in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers( self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, # through Tor to various high-profile onion services OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1) # Announce the public API server transport address # to other nodes if security level allows if config.get('general.security_level', 1) == 0 \ and config.get('general.announce_node', True): # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers( self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1) announceTimer.count = (announceTimer.frequency - 60) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True) # Timer to cleanup dead ephemeral forward secrecy keys OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1) # Adjust initial timer triggers cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 2) lookup_blocks_timer = (lookup_blocks_timer.frequency - 2) shared_state.add(self) if config.get('general.use_bootstrap_list', True): bootstrappeers.add_bootstrap_list_to_peer_list(self.kv, [], db_only=True) daemoneventhooks.daemon_event_handlers(shared_state) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) get_url() while not config.get('onboarding.done', True) and \ not self.shared_state.get_by_string( 'DeadSimpleKV').get('shutdown'): try: time.sleep(2) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) # Main daemon loop, mainly for calling timers, # don't do any complex operations here to avoid locking try: while not self.shared_state.get_by_string('DeadSimpleKV').get( 'shutdown'): for i in self.timers: if self.shared_state.get_by_string('DeadSimpleKV').get( 'shutdown'): break i.processTimer() time.sleep(self.delay) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True) try: self.service_greenlets except AttributeError: pass else: # Stop onionr direct connection services for server in self.service_greenlets: server.stop() try: time.sleep(0.5) except KeyboardInterrupt: pass