def kill_daemon(): ''' Shutdown the Onionr daemon (communicator) ''' logger.warn('Stopping the running daemon...', timestamp=False, terminal=True) try: # On platforms where we can, fork out to prevent locking try: pid = os.fork() if pid != 0: return except (AttributeError, OSError) as e: pass events.event('daemon_stop') net = NetController(config.get('client.port', 59496)) try: daemonqueue.daemon_queue_add('shutdown') except sqlite3.OperationalError: pass net.killTor() except Exception as e: logger.error('Failed to shutdown daemon: ' + str(e), error=e, timestamp=False, terminal=True) return
def soft_reset(): if localcommand.local_command('/ping') == 'pong!': logger.warn('Cannot soft reset while Onionr is running', terminal=True) return path = filepaths.block_data_location shutil.rmtree(path) _ignore_not_found_delete(dbfiles.block_meta_db) _ignore_not_found_delete(filepaths.upload_list) onionrevents.event('softreset') logger.info("Soft reset Onionr", terminal=True)
def process_block_metadata(blockHash: str): ''' Read metadata from a block and cache it to the block database blockHash -> sha3_256 hex formatted hash of Onionr block ''' curTime = epoch.get_rounded_epoch(roundS=60) myBlock = onionrblockapi.Block(blockHash) if myBlock.isEncrypted: myBlock.decrypt() if (myBlock.isEncrypted and myBlock.decrypted) or (not myBlock.isEncrypted): blockType = myBlock.getMetadata( 'type' ) # we would use myBlock.getType() here, but it is bugged with encrypted blocks signer = bytesconverter.bytes_to_str(myBlock.signer) valid = myBlock.verifySig() if valid: if myBlock.getMetadata('newFSKey') is not None: try: onionrusers.OnionrUser(signer).addForwardKey( myBlock.getMetadata('newFSKey')) except onionrexceptions.InvalidPubkey: logger.warn( '%s has invalid forward secrecy key to add: %s' % (signer, myBlock.getMetadata('newFSKey'))) try: if len(blockType) <= onionrvalues.MAX_BLOCK_TYPE_LENGTH: blockmetadb.update_block_info(blockHash, 'dataType', blockType) except TypeError: logger.warn("Missing block information") pass # Set block expire time if specified try: expireTime = int(myBlock.getHeader('expire')) # test that expire time is an integer of sane length (for epoch) # doesn't matter if its too large because of the min() func below if not len(str(expireTime)) < 20: raise ValueError('timestamp invalid') except (ValueError, TypeError) as e: expireTime = onionrvalues.DEFAULT_EXPIRE + curTime finally: expireTime = min(expireTime, curTime + onionrvalues.DEFAULT_EXPIRE) blockmetadb.update_block_info(blockHash, 'expire', expireTime) if blockType == 'update': updater.update_event(myBlock) onionrevents.event('processblocks', data={ 'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid })
def handle_daemon_commands(comm_inst): cmd = daemonqueue.daemon_queue() response = '' if cmd is not False: events.event('daemon_command', data={'cmd': cmd}) if cmd[0] == 'shutdown': comm_inst.shutdown = True elif cmd[0] == 'runtimeTest': comm_inst.shared_state.get_by_string( "OnionrRunTestManager").run_tests() elif cmd[0] == 'remove_from_insert_list': try: comm_inst.generating_blocks.remove(cmd[1]) except ValueError: pass elif cmd[0] == 'announceNode': if len(comm_inst.onlinePeers) > 0: comm_inst.announce(cmd[1]) else: logger.debug("No nodes connected. Will not introduce node.") elif cmd[0] == 'runCheck': # deprecated logger.debug('Status check; looks good.') open(filepaths.run_check_file + '.runcheck', 'w+').close() elif cmd[0] == 'connectedPeers': response = '\n'.join(list(comm_inst.onlinePeers)).strip() if response == '': response = 'none' elif cmd[0] == 'localCommand': response = localcommand.local_command(cmd[1]) elif cmd[0] == 'clearOffline': comm_inst.offlinePeers = [] elif cmd[0] == 'restartTor': restarttor.restart(comm_inst) comm_inst.offlinePeers = [] elif cmd[0] == 'pex': for i in comm_inst.timers: if i.timerFunction.__name__ == 'lookupAdders': i.count = (i.frequency - 1) elif cmd[0] == 'uploadBlock': comm_inst.blocksToUpload.append(cmd[1]) else: logger.debug( 'Received daemon queue command unable to be handled: %s' % (cmd[0], )) if cmd[0] not in ('', None): if response != '': localcommand.local_command('queueResponseAdd/' + cmd[4], post=True, postData={'data': response}) response = '' comm_inst.decrementThreadCount('handle_daemon_commands')
def addToDB(self, data, dataType=0, expire=0): """Add to the blacklist. Intended to be block hash, block data, peers, or transport addresses 0=block 1=peer 2=pubkey """ # we hash the data so we can remove data entirely from our node's disk hashed = bytesconverter.bytes_to_str( onionrcrypto.hashers.sha3_hash(data)) event('blacklist_add', data={'data': data, 'hash': hashed}) if len(hashed) > 64: raise Exception("Hashed data is too large") if not hashed.isalnum(): raise Exception("Hashed data is not alpha numeric") try: int(dataType) except ValueError: raise Exception("dataType is not int") try: int(expire) except ValueError: raise Exception("expire is not int") if self.inBlacklist(hashed): return insert = (hashed, ) blacklistDate = epoch.get_epoch() try: self._dbExecute( "INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire)) except sqlite3.IntegrityError: pass
def insert_block(data: Union[str, bytes], header: str = 'txt', sign: bool = False, encryptType: str = '', symKey: str = '', asymPeer: str = '', meta: dict = {}, expire: Union[int, None] = None, disableForward: bool = False, signing_key: UserIDSecretKey = '') -> Union[str, bool]: """ Create and insert a block into the network. encryptType must be specified to encrypt a block if expire is less than date, assumes seconds into future. if not assume exact epoch """ our_private_key = crypto.priv_key our_pub_key = crypto.pub_key storage_counter = storagecounter.StorageCounter() allocationReachedMessage = 'Cannot insert block, disk allocation reached.' if storage_counter.is_full(): logger.error(allocationReachedMessage) raise onionrexceptions.DiskAllocationReached if signing_key != '': # if it was specified to use an alternative private key our_private_key = signing_key our_pub_key = bytesconverter.bytes_to_str( crypto.cryptoutils.get_pub_key_from_priv(our_private_key)) retData = False if type(data) is None: raise ValueError('Data cannot be none') createTime = epoch.get_epoch() dataNonce = bytesconverter.bytes_to_str(crypto.hashers.sha3_hash(data)) try: with open(filepaths.data_nonce_file, 'r') as nonces: if dataNonce in nonces: return retData except FileNotFoundError: pass # record nonce with open(filepaths.data_nonce_file, 'a') as nonceFile: nonceFile.write(dataNonce + '\n') plaintext = data plaintextMeta = {} plaintextPeer = asymPeer retData = '' signature = '' signer = '' metadata = {} # metadata is full block metadata # meta is internal, user specified metadata # only use header if not set in provided meta meta['type'] = str(header) if encryptType in ('asym', 'sym'): metadata['encryptType'] = encryptType else: if not config.get('general.store_plaintext_blocks', True): raise onionrexceptions.InvalidMetadata( "Plaintext blocks are disabled, " + "yet a plaintext block was being inserted") if encryptType not in ('', None): raise onionrexceptions.InvalidMetadata( 'encryptType must be asym or sym, or blank') try: data = data.encode() except AttributeError: pass if encryptType == 'asym': # Duplicate the time in encrypted messages to help prevent replays meta['rply'] = createTime if sign and asymPeer != our_pub_key: try: forwardEncrypted = onionrusers.OnionrUser( asymPeer).forwardEncrypt(data) data = forwardEncrypted[0] meta['forwardEnc'] = True # Expire time of key. no sense keeping block after that expire = forwardEncrypted[2] except onionrexceptions.InvalidPubkey: pass if not disableForward: fsKey = onionrusers.OnionrUser(asymPeer).generateForwardKey() meta['newFSKey'] = fsKey jsonMeta = json.dumps(meta) plaintextMeta = jsonMeta if sign: signature = crypto.signing.ed_sign(jsonMeta.encode() + data, key=our_private_key, encodeResult=True) signer = our_pub_key if len(jsonMeta) > 1000: raise onionrexceptions.InvalidMetadata( 'meta in json encoded form must not exceed 1000 bytes') # encrypt block metadata/sig/content if encryptType == 'sym': raise NotImplementedError("not yet implemented") elif encryptType == 'asym': if stringvalidators.validate_pub_key(asymPeer): # Encrypt block data with forward secrecy key first, but not meta jsonMeta = json.dumps(meta) jsonMeta = crypto.encryption.pub_key_encrypt( jsonMeta, asymPeer, encodedData=True).decode() data = crypto.encryption.pub_key_encrypt(data, asymPeer, encodedData=False) signature = crypto.encryption.pub_key_encrypt( signature, asymPeer, encodedData=True).decode() signer = crypto.encryption.pub_key_encrypt( signer, asymPeer, encodedData=True).decode() try: onionrusers.OnionrUser(asymPeer, saveUser=True) except ValueError: # if peer is already known pass else: raise onionrexceptions.InvalidPubkey( asymPeer + ' is not a valid base32 encoded ed25519 key') # compile metadata metadata['meta'] = jsonMeta if len(signature) > 0: # I don't like not pattern metadata['sig'] = signature metadata['signer'] = signer metadata['time'] = createTime # ensure expire is integer and of sane length if type(expire) is not type(None): # noqa if not len(str(int(expire))) < 20: raise ValueError( 'expire must be valid int less than 20 digits in length') # if expire is less than date, assume seconds into future if expire < epoch.get_epoch(): expire = epoch.get_epoch() + expire metadata['expire'] = expire # send block data (and metadata) to POW module to get tokenized block data payload = subprocesspow.SubprocessPOW(data, metadata).start() if payload != False: # noqa try: retData = onionrstorage.set_data(payload) except onionrexceptions.DiskAllocationReached: logger.error(allocationReachedMessage) retData = False else: if disableForward: logger.warn( f'{retData} asym encrypted block created w/o ephemerality') """ Tell the api server through localCommand to wait for the daemon to upload this block to make statistical analysis more difficult """ spawn(localcommand.local_command, '/daemon-event/upload_event', post=True, is_json=True, post_data={ 'block': retData }).get(timeout=5) coredb.blockmetadb.add.add_to_block_DB(retData, selfInsert=True, dataSaved=True) if expire is None: coredb.blockmetadb.update_block_info( retData, 'expire', createTime + min( onionrvalues.DEFAULT_EXPIRE, config.get('general.max_block_age', onionrvalues.DEFAULT_EXPIRE))) else: coredb.blockmetadb.update_block_info(retData, 'expire', expire) blockmetadata.process_block_metadata(retData) if retData != False: # noqa if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS: events.event('insertdeniable', { 'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer) }, threaded=True) else: events.event('insertblock', { 'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer) }, threaded=True) spawn(localcommand.local_command, '/daemon-event/remove_from_insert_queue_wrapper', post=True, post_data={ 'block_hash': retData }, is_json=True).get(timeout=5) return retData
def daemon(): """Start Onionr's primary threads for communicator, API server, node, and LAN.""" def _handle_sig_term(signum, frame): pid = str(os.getpid()) main_pid = localcommand.local_command('/getpid') #logger.info(main_pid, terminal=True) if main_pid and main_pid == pid: logger.info( f"Received sigterm, shutting down gracefully. PID: {pid}", terminal=True) localcommand.local_command('/shutdownclean') else: logger.info( f"Recieved sigterm in child process or fork, exiting. PID: {pid}") sys.exit(0) signal.signal(signal.SIGTERM, _handle_sig_term) # Determine if Onionr is in offline mode. # When offline, Onionr can only use LAN and disk transport offline_mode = config.get('general.offline_mode', False) if not hastor.has_tor(): offline_mode = True logger.error("Tor is not present in system path or Onionr directory", terminal=True) # Create shared objects shared_state = toomanyobjs.TooMany() # Add DeadSimpleKV for quasi-global variables (ephemeral key-value) shared_state.get(DeadSimpleKV) # Initialize the quasi-global variables setup_kv(shared_state.get(DeadSimpleKV)) shared_state.get(daemoneventsapi.DaemonEventsBP) Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True, name='client HTTP API').start() if not offline_mode: Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True, name='public HTTP API').start() # Init run time tester # (ensures Onionr is running right, for testing purposes) # Run time tests are not normally run shared_state.get(runtests.OnionrRunTestManager) # Create singleton shared_state.get(serializeddata.SerializedData) shared_state.share_object() # share the parent object to the threads show_logo() # since we randomize loopback API server hostname to protect against attacks, # we have to wait for it to become set apiHost = '' if not offline_mode: apiHost = get_api_host_until_available() net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost) shared_state.add(net) shared_state.get(onionrstatistics.tor.TorStats) security_level = config.get('general.security_level', 1) use_existing_tor = config.get('tor.use_existing_tor', False) if not offline_mode: # we need to setup tor for use _setup_online_mode(use_existing_tor, net, security_level) _show_info_messages() logger.info( "Onionr daemon is running under " + str(os.getpid()), terminal=True) events.event('init', threaded=False) events.event('daemon_start') if config.get('transports.lan', True): if not onionrvalues.IS_QUBES: Thread(target=LANServer(shared_state).start_server, daemon=True).start() LANManager(shared_state).start() else: logger.warn('LAN not supported on Qubes', terminal=True) if config.get('transports.sneakernet', True): Thread(target=sneakernet_import_thread, daemon=True).start() Thread(target=statistics_reporter, args=[shared_state], daemon=True).start() shared_state.get(DeadSimpleKV).put( 'proxyPort', net.socksPort) spawn_client_threads(shared_state) clean_blocks_not_meeting_pow(shared_state) communicator.startCommunicator(shared_state) clean_ephemeral_services() if not offline_mode and not use_existing_tor: net.killTor() else: try: os.remove(filepaths.tor_hs_address_file) except FileNotFoundError: pass better_sleep(5) cleanup.delete_run_files() if security_level >= 2: filenuke.nuke.clean_tree(identifyhome.identify_home())
def daemon(): ''' Starts the Onionr communication daemon ''' if not hastor.has_tor(): logger.error("Tor is not present in system path or Onionr directory", terminal=True) cleanup.delete_run_files() sys.exit(1) # remove runcheck if it exists if os.path.isfile(filepaths.run_check_file): logger.debug( 'Runcheck file found on daemon start, deleting in advance.') os.remove(filepaths.run_check_file) # Create shared objects shared_state = toomanyobjs.TooMany() Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True, name='client HTTP API').start() Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True, name='public HTTP API').start() # Init run time tester (ensures Onionr is running right, for testing purposes) shared_state.get(runtests.OnionrRunTestManager) shared_state.get(serializeddata.SerializedData) shared_state.share_object() # share the parent object to the threads apiHost = '' while apiHost == '': try: with open(filepaths.public_API_host_file, 'r') as hostFile: apiHost = hostFile.read() except FileNotFoundError: pass time.sleep(0.5) logger.raw('', terminal=True) # print nice header thing :) if config.get('general.display_header', True): logoheader.header() version.version(verbosity=5, function=logger.info) logger.debug('Python version %s' % platform.python_version()) if onionrvalues.DEVELOPMENT_MODE: logger.warn('Development mode enabled', timestamp=False, terminal=True) net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost) shared_state.add(net) logger.info('Tor is starting...', terminal=True) if not net.startTor(): localcommand.local_command('shutdown') cleanup.delete_run_files() sys.exit(1) if len(net.myID) > 0 and config.get('general.security_level', 1) == 0: logger.debug('Started .onion service: %s' % (logger.colors.underline + net.myID)) else: logger.debug('.onion service disabled') logger.info( 'Using public key: %s' % (logger.colors.underline + getourkeypair.get_keypair()[0][:52])) try: time.sleep(1) except KeyboardInterrupt: pass events.event('init', threaded=False) events.event('daemon_start') communicator.startCommunicator(shared_state) localcommand.local_command('shutdown') net.killTor() try: time.sleep( 5 ) # Time to allow threads to finish, if not any "daemon" threads will be slaughtered http://docs.python.org/library/threading.html#threading.Thread.daemon except KeyboardInterrupt: pass cleanup.delete_run_files()
def daemon(): """Start Onionr's primary threads for communicator, API server, node, and LAN.""" # Determine if Onionr is in offline mode. # When offline, Onionr can only use LAN and disk transport offline_mode = config.get('general.offline_mode', False) if not hastor.has_tor(): offline_mode = True logger.error("Tor is not present in system path or Onionr directory", terminal=True) # remove runcheck if it exists if os.path.isfile(filepaths.run_check_file): logger.debug('Runcheck file found on daemon start, deleting.') os.remove(filepaths.run_check_file) # Create shared objects shared_state = toomanyobjs.TooMany() # Add DeadSimpleKV for quasi-global variables (ephemeral key-value) shared_state.get(DeadSimpleKV) # Initialize the quasi-global variables setup_kv(shared_state.get(DeadSimpleKV)) spawn_client_threads(shared_state) shared_state.get(daemoneventsapi.DaemonEventsBP) Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True, name='client HTTP API').start() if not offline_mode: Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True, name='public HTTP API').start() # Init run time tester # (ensures Onionr is running right, for testing purposes) # Run time tests are not normally run shared_state.get(runtests.OnionrRunTestManager) # Create singleton shared_state.get(serializeddata.SerializedData) shared_state.share_object() # share the parent object to the threads show_logo() # since we randomize loopback API server hostname to protect against attacks, # we have to wait for it to become set apiHost = '' if not offline_mode: apiHost = get_api_host_until_available() net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost) shared_state.add(net) shared_state.get(onionrstatistics.tor.TorStats) security_level = config.get('general.security_level', 1) use_existing_tor = config.get('tor.use_existing_tor', False) if not offline_mode: # we need to setup tor for use _setup_online_mode(use_existing_tor, net, security_level) _show_info_messages() events.event('init', threaded=False) events.event('daemon_start') if config.get('transports.lan', True): Thread(target=LANServer(shared_state).start_server, daemon=True).start() LANManager(shared_state).start() if config.get('transports.sneakernet', True): Thread(target=sneakernet_import_thread, daemon=True).start() Thread(target=statistics_reporter, args=[shared_state], daemon=True).start() communicator.startCommunicator(shared_state) clean_ephemeral_services() if not offline_mode and not use_existing_tor: net.killTor() else: try: os.remove(filepaths.tor_hs_address_file) except FileNotFoundError: pass better_sleep(5) cleanup.delete_run_files() if security_level >= 2: filenuke.nuke.clean_tree(identifyhome.identify_home())