def testPluginReload(self): logger.debug('-'*26 + '\n') logger.info('Running simple plugin reload test...') import onionrplugins try: onionrplugins.reload('test') self.assertTrue(True) except: self.assertTrue(False)
def reload_plugin(o_inst): ''' Reloads (stops and starts) all plugins, or the given plugin ''' if len(sys.argv) >= 3: plugin_name = sys.argv[2] logger.info('Reloading plugin "%s"...' % plugin_name) plugins.stop(plugin_name, o_inst) plugins.start(plugin_name, o_inst) else: logger.info('Reloading all plugins...') plugins.reload(o_inst)
def reloadPlugin(self): ''' Reloads (stops and starts) all plugins, or the given plugin ''' if len(sys.argv) >= 3: plugin_name = sys.argv[2] logger.info('Reloading plugin \"' + plugin_name + '\"...') plugins.stop(plugin_name) plugins.start(plugin_name) else: logger.info('Reloading all plugins...') plugins.reload() return
def testPluginReload(self): logger.debug('-' * 26 + '\n') logger.info('Running simple plugin reload test...') import onionrplugins, os if not onionrplugins.exists('test'): os.makedirs(onionrplugins.get_plugins_folder('test')) with open( onionrplugins.get_plugins_folder('test') + '/main.py', 'a') as main: main.write( "print('Running')\n\ndef on_test(pluginapi, data = None):\n print('received test event!')\n return True\n\ndef on_start(pluginapi, data = None):\n print('start event called')\n\ndef on_stop(pluginapi, data = None):\n print('stop event called')\n\ndef on_enable(pluginapi, data = None):\n print('enable event called')\n\ndef on_disable(pluginapi, data = None):\n print('disable event called')\n" ) onionrplugins.enable('test') try: onionrplugins.reload('test') self.assertTrue(True) except: self.assertTrue(False)
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.storage_counter = storagecounter.StorageCounter() self.isOnline = True # Assume we're connected to the internet self.shared_state = shared_state # TooManyObjects module # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort # Upload information, list of blocks to upload self.blocksToUpload = [] self.upload_session_manager = self.shared_state.get(uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 1 # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] self.cooldownPeer = {} self.connectTimes = {} # list of peer's profiles (onionrpeers.PeerProfile instances) self.peerProfiles = [] # Peers merged to us. Don't add to db until we know they're reachable self.newPeers = [] self.announceProgress = {} self.announceCache = {} self.generating_blocks = [] # amount of threads running by name, used to prevent too many self.threadCounts = {} # set true when shutdown command received self.shutdown = False # list of new blocks to download, added to when new block lists are fetched from peers self.blockQueue = {} # list of blocks currently downloading, avoid s self.currentDownloading = [] # timestamp when the last online node was seen self.lastNodeSeen = None # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time self.dbTimestamps = {} # Clear the daemon queue for any dead messages if os.path.exists(dbfiles.daemon_queue_db): daemonqueue.clear_daemon_queue() # Loads in and starts the enabled plugins plugins.reload() # time app started running for info/statistics purposes self.startTime = epoch.get_epoch() uploadqueue.UploadQueue(self) # extends our upload list and saves our list when Onionr exits if developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 30) # Set timers, function reference, seconds # requires_peer True means the timer function won't fire if we have no connected peers peerPoolTimer = OnionrCommunicatorTimers(self, onlinepeers.get_online_peers, 60, max_threads=1, my_args=[self]) OnionrCommunicatorTimers(self, self.runCheck, 2, max_threads=1) # Timers to periodically lookup new blocks and download them lookup_blocks_timer = OnionrCommunicatorTimers(self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1) # The block download timer is accessed by the block lookup function to trigger faster download starts self.download_blocks_timer = OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5) # Timer to reset the longest offline peer so contact can be attempted again OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self]) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers(self, housekeeping.clean_old_blocks, 20, my_args=[self]) # Timer to discover new peers OnionrCommunicatorTimers(self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2) # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1) # Timer to process the daemon command queue OnionrCommunicatorTimers(self, daemonqueuehandler.handle_daemon_commands, 6, my_args=[self], max_threads=3) # Setup direct connections if config.get('general.socket_servers', False): self.services = onionrservices.OnionrServices() self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self]) else: self.services = None # {peer_pubkey: ephemeral_address}, the address to reach them self.direct_connection_clients = {} # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers(self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, through Tor to various high-profile onion services netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1) # Announce the public API server transport address to other nodes if security level allows if config.get('general.security_level', 1) == 0 and config.get('general.announce_node', True): # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers(self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1) announceTimer.count = (announceTimer.frequency - 120) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True) # Timer to cleanup dead ephemeral forward secrecy keys forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1) # Adjust initial timer triggers peerPoolTimer.count = (peerPoolTimer.frequency - 1) cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 2) lookup_blocks_timer = (lookup_blocks_timer.frequency - 2) shared_state.add(self) if config.get('general.use_bootstrap', True): bootstrappeers.add_bootstrap_list_to_peer_list(self, [], db_only=True) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) while not config.get('onboarding.done', True): time.sleep(5) # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking try: while not self.shutdown: for i in self.timers: if self.shutdown: break i.processTimer() time.sleep(self.delay) # Debug to print out used FDs (regular and net) #proc = psutil.Process() #print(proc.open_files(), len(psutil.net_connections())) except KeyboardInterrupt: self.shutdown = True pass logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True) try: self.service_greenlets except AttributeError: pass else: # Stop onionr direct connection services for server in self.service_greenlets: server.stop() localcommand.local_command('shutdown') # shutdown the api try: time.sleep(0.5) except KeyboardInterrupt: pass
def __init__(self, onionrInst, proxyPort, developmentMode=config.get('general.dev_mode', False)): onionrInst.communicatorInst = self # configure logger and stuff onionr.Onionr.setupConfig('data/', self=self) self.proxyPort = proxyPort self.isOnline = True # Assume we're connected to the internet # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = proxyPort self._core = onionrInst.onionrCore self.blocksToUpload = [] # loop time.sleep delay in seconds self.delay = 1 # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] self.cooldownPeer = {} self.connectTimes = {} self.peerProfiles = [ ] # list of peer's profiles (onionrpeers.PeerProfile instances) self.newPeers = [ ] # Peers merged to us. Don't add to db until we know they're reachable self.announceProgress = {} self.announceCache = {} # amount of threads running by name, used to prevent too many self.threadCounts = {} # set true when shutdown command received self.shutdown = False # list of new blocks to download, added to when new block lists are fetched from peers self.blockQueue = {} # list of blocks currently downloading, avoid s self.currentDownloading = [] # timestamp when the last online node was seen self.lastNodeSeen = None # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time self.dbTimestamps = {} # Clear the daemon queue for any dead messages if os.path.exists(self._core.queueDB): self._core.clearDaemonQueue() # Loads in and starts the enabled plugins plugins.reload() # time app started running for info/statistics purposes self.startTime = self._core._utils.getEpoch() if developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 30) # Set timers, function reference, seconds # requiresPeer True means the timer function won't fire if we have no connected peers peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1) OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1) # Timers to periodically lookup new blocks and download them OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2) # Timer to reset the longest offline peer so contact can be attempted again OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers( self, housekeeping.clean_old_blocks, 65, myArgs=[self]) # Timer to discover new peers OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, myArgs=[self], requiresPeer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, self.uploadBlock, 5, requiresPeer=True, maxThreads=1) # Timer to process the daemon command queue OnionrCommunicatorTimers(self, self.daemonCommands, 6, maxThreads=3) # Timer that kills Onionr if the API server crashes OnionrCommunicatorTimers(self, self.detectAPICrash, 30, maxThreads=1) # Setup direct connections if config.get('general.socket_servers', False): self.services = onionrservices.OnionrServices(self._core) self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self]) else: self.services = None # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers( self, deniableinserts.insert_deniable_block, 180, myArgs=[self], requiresPeer=True, maxThreads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, through Tor to various high-profile onion services netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 600, myArgs=[self]) # Announce the public API server transport address to other nodes if security level allows if config.get('general.security_level', 1) == 0: # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers( self, announcenode.announce_node, 3600, myArgs=[self], requiresPeer=True, maxThreads=1) announceTimer.count = (announceTimer.frequency - 120) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) # Timer to cleanup dead ephemeral forward secrecy keys forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, myArgs=[self], maxThreads=1) # Adjust initial timer triggers peerPoolTimer.count = (peerPoolTimer.frequency - 1) cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 5) # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking try: while not self.shutdown: for i in self.timers: if self.shutdown: break i.processTimer() time.sleep(self.delay) # Debug to print out used FDs (regular and net) #proc = psutil.Process() #print(proc.open_files(), len(psutil.net_connections())) except KeyboardInterrupt: self.shutdown = True pass logger.info('Goodbye. (Onionr is cleaning up, and will exit)') try: self.service_greenlets except AttributeError: pass else: for server in self.service_greenlets: server.stop() self._core._utils.localCommand('shutdown') # shutdown the api time.sleep(0.5)
def reload(self, name): onionrplugins.reload(name)
def __init__(self, debug, developmentMode): ''' OnionrCommunicate This class handles communication with nodes in the Onionr network. ''' self._core = core.Core() self._utils = onionrutils.OnionrUtils(self._core) self._crypto = onionrcrypto.OnionrCrypto(self._core) self._netController = netcontroller.NetController( 0) # arg is the HS port but not needed rn in this file self.newHashes = { } # use this to not keep hashes around too long if we cant get their data self.keepNewHash = 12 self.ignoredHashes = [] self.highFailureAmount = 7 self.communicatorThreads = 0 self.maxThreads = 75 self.processBlocksThreads = 0 self.lookupBlocksThreads = 0 self.blocksProcessing = [ ] # list of blocks currently processing, to avoid trying a block twice at once in 2 seperate threads self.peerStatus = { } # network actions (active requests) for peers used mainly to prevent conflicting actions in threads self.communicatorTimers = { } # communicator timers, name: rate (in seconds) self.communicatorTimerCounts = {} self.communicatorTimerFuncs = {} self.registerTimer('blockProcess', 20) self.registerTimer('highFailure', 10) self.registerTimer('heartBeat', 10) self.registerTimer('pex', 120) logger.debug('Communicator debugging enabled.') with open('data/hs/hostname', 'r') as torID: todID = torID.read() apiRunningCheckRate = 10 apiRunningCheckCount = 0 self.peerData = { } # Session data for peers (recent reachability, speed, etc) if os.path.exists(self._core.queueDB): self._core.clearDaemonQueue() # Loads in and starts the enabled plugins plugins.reload() while True: command = self._core.daemonQueue() # Process blocks based on a timer self.timerTick() # TODO: migrate below if statements to be own functions which are called in the above timerTick() function if self.communicatorTimers[ 'highFailure'] == self.communicatorTimerCounts[ 'highFailure']: self.communicatorTimerCounts['highFailure'] = 0 for i in self.peerData: if self.peerData[i]['failCount'] >= self.highFailureAmount: self.peerData[i]['failCount'] -= 1 if self.communicatorTimers['pex'] == self.communicatorTimerCounts[ 'pex']: pT1 = threading.Thread(target=self.getNewPeers, name="pT1") pT1.start() pT2 = threading.Thread(target=self.getNewPeers, name="pT2") pT2.start() self.communicatorTimerCounts[ 'pex'] = 0 # TODO: do not reset timer if low peer count if self.communicatorTimers[ 'heartBeat'] == self.communicatorTimerCounts['heartBeat']: logger.debug('Communicator heartbeat') self.communicatorTimerCounts['heartBeat'] = 0 if self.communicatorTimers[ 'blockProcess'] == self.communicatorTimerCounts[ 'blockProcess']: lT1 = threading.Thread(target=self.lookupBlocks, name="lt1", args=(True, )) lT2 = threading.Thread(target=self.lookupBlocks, name="lt2", args=(True, )) lT3 = threading.Thread(target=self.lookupBlocks, name="lt3", args=(True, )) lT4 = threading.Thread(target=self.lookupBlocks, name="lt4", args=(True, )) pbT1 = threading.Thread(target=self.processBlocks, name='pbT1', args=(True, )) pbT2 = threading.Thread(target=self.processBlocks, name='pbT2', args=(True, )) pbT3 = threading.Thread(target=self.processBlocks, name='pbT3', args=(True, )) pbT4 = threading.Thread(target=self.processBlocks, name='pbT4', args=(True, )) if (self.maxThreads - 8) >= threading.active_count(): lT1.start() lT2.start() lT3.start() lT4.start() pbT1.start() pbT2.start() pbT3.start() pbT4.start() self.communicatorTimerCounts['blockProcess'] = 0 else: logger.debug(threading.active_count()) logger.debug('Too many threads.') if command != False: if command[0] == 'shutdown': logger.info('Daemon received exit command.', timestamp=True) break elif command[0] == 'announceNode': announceAttempts = 3 announceAttemptCount = 0 announceVal = False logger.info('Announcing node to ' + command[1], timestamp=True) while not announceVal: announceAttemptCount += 1 announceVal = self.performGet( 'announce', command[1], data=self._core.hsAdder.replace('\n', ''), skipHighFailureAddress=True) logger.info(announceVal) if announceAttemptCount >= announceAttempts: logger.warn('Unable to announce to ' + command[1]) break elif command[0] == 'runCheck': logger.info('Status check; looks good.') open('data/.runcheck', 'w+').close() elif command[0] == 'kex': self.pexCount = pexTimer - 1 elif command[0] == 'event': # todo pass elif command[0] == 'checkCallbacks': try: data = json.loads(command[1]) logger.info( 'Checking for callbacks with connection %s...' % data['id']) self.check_callbacks( data, config.get('dc_execcallbacks', True)) events.event('incoming_direct_connection', data={ 'callback': True, 'communicator': self, 'data': data }) except Exception as e: logger.error( 'Failed to interpret callbacks for checking', e) elif command[0] == 'incomingDirectConnection': try: data = json.loads(command[1]) logger.info('Handling incoming connection %s...' % data['id']) self.incoming_direct_connection(data) events.event('incoming_direct_connection', data={ 'callback': False, 'communicator': self, 'data': data }) except Exception as e: logger.error('Failed to handle callbacks for checking', e) apiRunningCheckCount += 1 # check if local API is up if apiRunningCheckCount > apiRunningCheckRate: if self._core._utils.localCommand('ping') != 'pong': for i in range(4): if self._utils.localCommand('ping') == 'pong': apiRunningCheckCount = 0 break # break for loop time.sleep(1) else: # This executes if the api is NOT detected to be running logger.error( 'Daemon detected API crash (or otherwise unable to reach API after long time), stopping...' ) break # break main daemon loop apiRunningCheckCount = 0 time.sleep(1) self._netController.killTor() return
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.shared_state = shared_state # TooManyObjects module # populate kv values self.kv = self.shared_state.get_by_string('DeadSimpleKV') if config.get('general.offline_mode', False): self.kv.put('isOnline', False) # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort self.upload_session_manager = self.shared_state.get( uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 1 # amount of threads running by name, used to prevent too many self.threadCounts = {} # Loads in and starts the enabled plugins plugins.reload() # extends our upload list and saves our list when Onionr exits uploadqueue.UploadQueue(self) # Timers to periodically lookup new blocks and download them lookup_blocks_timer = OnionrCommunicatorTimers( self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1) """The block download timer is accessed by the block lookup function to trigger faster download starts""" self.download_blocks_timer = OnionrCommunicatorTimers( self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5) # Timer to reset the longest offline peer # so contact can be attempted again OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self], max_threads=1) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers( self, housekeeping.clean_old_blocks, 20, my_args=[self], max_threads=1) # Timer to discover new peers OnionrCommunicatorTimers( self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2) # Timer for adjusting which peers # we actively communicate to at any given time, # to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1) # Setup direct connections if config.get('general.ephemeral_tunnels', False): self.services = onionrservices.OnionrServices() self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self]) else: self.services = None # {peer_pubkey: ephemeral_address}, the address to reach them self.direct_connection_clients = {} # This timer creates deniable blocks, # in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers( self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, # through Tor to various high-profile onion services OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1) # Announce the public API server transport address # to other nodes if security level allows if config.get('general.security_level', 1) == 0 \ and config.get('general.announce_node', True): # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers( self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1) announceTimer.count = (announceTimer.frequency - 60) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True) # Timer to cleanup dead ephemeral forward secrecy keys OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1) # Adjust initial timer triggers cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 2) lookup_blocks_timer = (lookup_blocks_timer.frequency - 2) shared_state.add(self) if config.get('general.use_bootstrap_list', True): bootstrappeers.add_bootstrap_list_to_peer_list(self.kv, [], db_only=True) daemoneventhooks.daemon_event_handlers(shared_state) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) get_url() while not config.get('onboarding.done', True) and \ not self.shared_state.get_by_string( 'DeadSimpleKV').get('shutdown'): try: time.sleep(2) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) # Main daemon loop, mainly for calling timers, # don't do any complex operations here to avoid locking try: while not self.shared_state.get_by_string('DeadSimpleKV').get( 'shutdown'): for i in self.timers: if self.shared_state.get_by_string('DeadSimpleKV').get( 'shutdown'): break i.processTimer() time.sleep(self.delay) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True) try: self.service_greenlets except AttributeError: pass else: # Stop onionr direct connection services for server in self.service_greenlets: server.stop() try: time.sleep(0.5) except KeyboardInterrupt: pass
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.shared_state = shared_state # TooManyObjects module shared_state.add(self) # populate kv values self.kv = self.shared_state.get_by_string('DeadSimpleKV') if config.get('general.offline_mode', False): self.kv.put('isOnline', False) # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort self.upload_session_manager = self.shared_state.get( uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 5 # amount of threads running by name, used to prevent too many self.threadCounts = {} # Loads in and starts the enabled plugins plugins.reload() # extends our upload list and saves our list when Onionr exits uploadqueue.UploadQueue(self) add_onionr_thread(lookupblocks.lookup_blocks_from_communicator, [self.shared_state], 25, 3) add_onionr_thread(downloadblocks.download_blocks_from_communicator, [self.shared_state], config.get('timers.getBlocks', 10), 1) add_onionr_thread(onlinepeers.clear_offline_peer, [self.kv], 58) add_onionr_thread(housekeeping.clean_old_blocks, [self.shared_state], 10, 1) # Discover new peers add_onionr_thread( lookupadders.lookup_new_peer_transports_with_communicator, [shared_state], 60, 3) # Timer for adjusting which peers # we actively communicate to at any given time, # to avoid over-using peers add_onionr_thread(cooldownpeer.cooldown_peer, [self.shared_state], 30, 60) # Timer to read the upload queue and upload the entries to peers add_onionr_thread(uploadblocks.upload_blocks_from_communicator, [self.shared_state], 5, 1) # This timer creates deniable blocks, # in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): add_onionr_thread(deniableinserts.insert_deniable_block, [], 180, 10) if config.get('transports.tor', True): # Timer to check for connectivity, # through Tor to various high-profile onion services add_onionr_thread(netcheck.net_check, [shared_state], 500, 60) # Announce the public API server transport address # to other nodes if security level allows if config.get('general.security_level', 1) == 0 \ and config.get('general.announce_node', True): # Default to high security level incase config breaks add_onionr_thread(announcenode.announce_node, [self.shared_state], 600, 60) else: logger.debug('Will not announce node.') add_onionr_thread(onionrpeers.peer_cleanup, [], 300, 300) add_onionr_thread(housekeeping.clean_keys, [], 15, 1) if config.get('general.use_bootstrap_list', True): bootstrappeers.add_bootstrap_list_to_peer_list(self.kv, [], db_only=True) daemoneventhooks.daemon_event_handlers(shared_state) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) get_url() while not config.get('onboarding.done', True) and \ not self.shared_state.get_by_string( 'DeadSimpleKV').get('shutdown'): try: time.sleep(2) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) # Main daemon loop, mainly for calling timers, # don't do any complex operations here to avoid locking try: while not self.shared_state.get_by_string('DeadSimpleKV').get( 'shutdown'): time.sleep(self.delay) except KeyboardInterrupt: self.shared_state.get_by_string('DeadSimpleKV').put( 'shutdown', True) logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True)
def __init__(self, debug, developmentMode): ''' OnionrCommunicate This class handles communication with nodes in the Onionr network. ''' self._core = core.Core() self._utils = onionrutils.OnionrUtils(self._core) self._crypto = onionrcrypto.OnionrCrypto(self._core) self.highFailureAmount = 7 ''' logger.info('Starting Bitcoin Node... with Tor socks port:' + str(sys.argv[2])) try: self.bitcoin = btc.OnionrBTC(torP=int(sys.argv[2])) except _gdbm.error: pass logger.info('Bitcoin Node started, on block: ' + self.bitcoin.node.getBlockHash(self.bitcoin.node.getLastBlockHeight())) ''' #except: #logger.fatal('Failed to start Bitcoin Node, exiting...') #exit(1) blockProcessTimer = 0 blockProcessAmount = 5 highFailureTimer = 0 highFailureRate = 10 heartBeatTimer = 0 heartBeatRate = 5 pexTimer = 5 # How often we should check for new peers pexCount = 0 logger.debug('Communicator debugging enabled.') torID = open('data/hs/hostname').read() self.peerData = { } # Session data for peers (recent reachability, speed, etc) if os.path.exists(self._core.queueDB): self._core.clearDaemonQueue() # Loads in and starts the enabled plugins plugins.reload() while True: command = self._core.daemonQueue() # Process blocks based on a timer blockProcessTimer += 1 heartBeatTimer += 1 pexCount += 1 if highFailureTimer == highFailureRate: highFailureTimer = 0 for i in self.peerData: if self.peerData[i]['failCount'] == self.highFailureAmount: self.peerData[i]['failCount'] -= 1 if pexTimer == pexCount: self.getNewPeers() pexCount = 0 if heartBeatRate == heartBeatTimer: logger.debug('Communicator heartbeat') heartBeatTimer = 0 if blockProcessTimer == blockProcessAmount: self.lookupBlocks() self.processBlocks() blockProcessTimer = 0 if command != False: if command[0] == 'shutdown': logger.info('Daemon recieved exit command.') break time.sleep(1) return