def _handle_sig_term(signum, frame): pid = str(os.getpid()) main_pid = localcommand.local_command('/getpid') #logger.info(main_pid, terminal=True) if main_pid and main_pid == pid: logger.info( f"Received sigterm, shutting down gracefully. PID: {pid}", terminal=True) localcommand.local_command('/shutdownclean') else: logger.info( f"Recieved sigterm in child process or fork, exiting. PID: {pid}" ) sys.exit(0)
def net_check(comm_inst): """Check if we are connected to the internet. or not when we can't connect to any peers """ # for detecting if we have received incoming connections recently rec = False if len(comm_inst.onlinePeers) == 0: try: if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60: comm_inst.isOnline = True rec = True except ValueError: pass if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort): if not comm_inst.shutdown: if not comm_inst.config.get('general.offline_mode', False): logger.warn( 'Network check failed, are you connected to ' + 'the Internet, and is Tor working? ' + 'This is usually temporary, but bugs and censorship can cause this to persist, in which case you should report it to beardog [at] mailbox.org', # noqa terminal=True) restarttor.restart(comm_inst) comm_inst.offlinePeers = [] comm_inst.isOnline = False else: comm_inst.isOnline = True comm_inst.decrementThreadCount('net_check')
def test_clearnet_tor_request(testmanager): """Ensure that Tor cannot request clearnet address. Does not run if Tor is being reused """ config.reload() leak_result = "" if config.get('tor.use_existing_tor', False): logger.warn( "Can't ensure Tor reqs to clearnet won't happen when reusing Tor") return socks_port = localcommand.local_command('/gettorsocks') # Don't worry, this request isn't meant to go through, # but if it did it would be through Tor try: leak_result: str = do_get_request('https://example.com/notvalidpage', port=socks_port, ignoreAPI=True).lower() except AttributeError: leak_result = "" except Exception as e: logger.warn(str(e)) try: if 'example' in leak_result: logger.error('Tor was able to request a clearnet site') raise ValueError('Tor was able to request a clearnet site') except TypeError: pass
def daemon_queue_get_response(responseID=''): ''' Get a response sent by communicator to the API, by requesting to the API ''' if len(responseID) == 0: raise ValueError('ResponseID should not be empty') resp = localcommand.local_command(dbfiles.daemon_queue_db, 'queueResponse/' + responseID) return resp
def net_check(shared_state): """Check if we are connected to the internet. or not when we can't connect to any peers """ # for detecting if we have received incoming connections recently rec = False kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") proxy_port = shared_state.get_by_string("NetController").socksPort if len(kv.get('onlinePeers')) == 0: try: if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60: kv.put('isOnline', True) rec = True except ValueError: pass if not rec and not netutils.check_network(torPort=proxy_port): if not kv.get('shutdown'): if not shared_state.get_by_string( "OnionrCommunicatorDaemon").config.get( 'general.offline_mode', False): logger.warn( 'Network check failed, are you connected to ' + 'the Internet, and is Tor working? ' + 'This is usually temporary, but bugs and censorship can cause this to persist, in which case you should report it to beardog [at] mailbox.org', # noqa terminal=True) restarttor.restart(shared_state) kv.put('offlinePeers', []) kv.put('isOnline', False) else: kv.put('isOnline', True)
def restart(): """Tell the Onionr daemon to restart.""" if platform.system() == 'Windows': logger.warn('Cannot restart Onionr on Windows. Run stop and manually restart.', terminal=True) return logger.info('Restarting Onionr', terminal=True) # On platforms where we can, fork out to prevent locking try: pid = os.fork() if pid != 0: return except (AttributeError, OSError): logger.warn('Could not fork on restart') with open(filepaths.restarting_indicator, 'w') as f: f.write('t') daemonlaunch.kill_daemon() while localcommand.local_command('ping', max_wait=8) == 'pong!': time.sleep(0.3) time.sleep(15) while (os.path.exists(filepaths.private_API_host_file) or (os.path.exists(filepaths.daemon_mark_file))): time.sleep(1) cleanup.delete_run_files() subprocess.Popen([SCRIPT_NAME, 'start'])
def check_ui(test_manager): endpoints = ['/', '/mail/', '/friends/', '/board/'] for point in endpoints: result = localcommand.local_command(point) if not result: raise ValueError result = result.lower() if not 'script' in result: raise ValueError(f'uicheck failed on {point}')
def __delete(directory): tor_dir = '%s/%s/' % (identifyhome.identify_home(), directory) if os.path.exists(tor_dir): if localcommand.local_command('/ping') == 'pong!': logger.warn('Cannot delete Tor data while Onionr is running', terminal=True) else: shutil.rmtree(tor_dir) logger.info('Tor reset', terminal=True)
def _wait_for_ui_to_be_ready(): if config.get('general.offline_mode', False) or \ not config.get('transports.tor', True) or \ config.get('tor.use_existing_tor'): return _tell_if_ui_not_ready() while local_command('/torready') != 'true': sleep(0.5) logger.info("Tor is ready, opening UI", terminal=True)
def soft_reset(): if localcommand.local_command('/ping') == 'pong!': logger.warn('Cannot soft reset while Onionr is running', terminal=True) return path = filepaths.block_data_location shutil.rmtree(path) _ignore_not_found_delete(dbfiles.block_meta_db) _ignore_not_found_delete(filepaths.upload_list) onionrevents.event('softreset') logger.info("Soft reset Onionr", terminal=True)
def config_get(key): ret_data = False if running_detected or first_get: first_get = False ret_data = localcommand.local_command('/config/get/' + key) if ret_data == False: running_detected = False ret_data = config.get(key) else: running_detected = False return ret_data
def _setup_online_mode(use_existing_tor: bool, net: NetController, security_level: int): if config.get('transports.tor', True): # If we are using tor, check if we are using an existing tor instance # if we are, we need to create an onion service on it and set attrs on our NetController # if not, we need to tell netcontroller to start one if use_existing_tor: try: os.mkdir(filepaths.tor_hs_loc) except FileExistsError: pass net.socksPort = config.get('tor.existing_socks_port') try: net.myID = create_onion_service(port=net.apiServerIP + ':' + str(net.hsPort))[0] except IncorrectPassword: # Exit if we cannot connect to the existing Tor instance logger.error('Invalid Tor control password', terminal=True) localcommand.local_command('shutdown') cleanup.delete_run_files() sys.exit(1) if not net.myID.endswith('.onion'): net.myID += '.onion' with open(filepaths.tor_hs_address_file, 'w') as tor_file: tor_file.write(net.myID) else: logger.info('Tor is starting...', terminal=True) if not net.startTor(): # Exit if we cannot start Tor. localcommand.local_command('shutdown') cleanup.delete_run_files() sys.exit(1) if len(net.myID) > 0 and security_level == 0: logger.debug('Started .onion service: %s' % (logger.colors.underline + net.myID)) else: logger.debug('.onion service disabled')
def _check_upload_queue(): """Returns the current upload queue len raises OverflowError if max, false if api not running """ max_upload_queue: int = 5000 queue = localcommand.local_command('/gethidden', maxWait=10) up_queue = False try: up_queue = len(queue.splitlines()) except AttributeError: pass else: if up_queue >= max_upload_queue: raise OverflowError return up_queue
def net_check(comm_inst): '''Check if we are connected to the internet or not when we can't connect to any peers''' rec = False # for detecting if we have received incoming connections recently if len(comm_inst.onlinePeers) == 0: try: if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60: comm_inst.isOnline = True rec = True except ValueError: pass if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort): if not comm_inst.shutdown: logger.warn('Network check failed, are you connected to the Internet, and is Tor working?', terminal=True) restarttor.restart(comm_inst) comm_inst.offlinePeers = [] comm_inst.isOnline = False else: comm_inst.isOnline = True comm_inst.decrementThreadCount('net_check')
def restart(): logger.info('Restarting Onionr', terminal=True) # On platforms where we can, fork out to prevent locking try: pid = os.fork() if pid != 0: return except (AttributeError, OSError) as e: if platform.platform() != 'Windows': logger.warn('Could not fork on restart') daemonlaunch.kill_daemon() while localcommand.local_command('ping', maxWait=8) == 'pong!': time.sleep(0.3) time.sleep(15) while os.path.exists(filepaths.private_API_host_file) or os.path.exists( filepaths.daemon_mark_file): time.sleep(1) cleanup.delete_run_files() subprocess.Popen([SCRIPT_NAME, 'start'])
def _tell_if_ui_not_ready(): if local_command('/torready') != 'true': logger.warn('The UI is not ready yet, waiting on Tor to start.', terminal=True)
def __init__(self, shared_state, developmentMode=None): if developmentMode is None: developmentMode = config.get('general.dev_mode', False) # configure logger and stuff self.config = config self.storage_counter = storagecounter.StorageCounter() self.isOnline = True # Assume we're connected to the internet self.shared_state = shared_state # TooManyObjects module # list of timer instances self.timers = [] # initialize core with Tor socks port being 3rd argument self.proxyPort = shared_state.get(NetController).socksPort # Upload information, list of blocks to upload self.blocksToUpload = [] self.upload_session_manager = self.shared_state.get(uploadblocks.sessionmanager.BlockUploadSessionManager) self.shared_state.share_object() # loop time.sleep delay in seconds self.delay = 1 # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] self.cooldownPeer = {} self.connectTimes = {} # list of peer's profiles (onionrpeers.PeerProfile instances) self.peerProfiles = [] # Peers merged to us. Don't add to db until we know they're reachable self.newPeers = [] self.announceProgress = {} self.announceCache = {} self.generating_blocks = [] # amount of threads running by name, used to prevent too many self.threadCounts = {} # set true when shutdown command received self.shutdown = False # list of new blocks to download, added to when new block lists are fetched from peers self.blockQueue = {} # list of blocks currently downloading, avoid s self.currentDownloading = [] # timestamp when the last online node was seen self.lastNodeSeen = None # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time self.dbTimestamps = {} # Clear the daemon queue for any dead messages if os.path.exists(dbfiles.daemon_queue_db): daemonqueue.clear_daemon_queue() # Loads in and starts the enabled plugins plugins.reload() # time app started running for info/statistics purposes self.startTime = epoch.get_epoch() uploadqueue.UploadQueue(self) # extends our upload list and saves our list when Onionr exits if developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 30) # Set timers, function reference, seconds # requires_peer True means the timer function won't fire if we have no connected peers peerPoolTimer = OnionrCommunicatorTimers(self, onlinepeers.get_online_peers, 60, max_threads=1, my_args=[self]) OnionrCommunicatorTimers(self, self.runCheck, 2, max_threads=1) # Timers to periodically lookup new blocks and download them lookup_blocks_timer = OnionrCommunicatorTimers(self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1) # The block download timer is accessed by the block lookup function to trigger faster download starts self.download_blocks_timer = OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5) # Timer to reset the longest offline peer so contact can be attempted again OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self]) # Timer to cleanup old blocks blockCleanupTimer = OnionrCommunicatorTimers(self, housekeeping.clean_old_blocks, 20, my_args=[self]) # Timer to discover new peers OnionrCommunicatorTimers(self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2) # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True) # Timer to read the upload queue and upload the entries to peers OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1) # Timer to process the daemon command queue OnionrCommunicatorTimers(self, daemonqueuehandler.handle_daemon_commands, 6, my_args=[self], max_threads=3) # Setup direct connections if config.get('general.socket_servers', False): self.services = onionrservices.OnionrServices() self.active_services = [] self.service_greenlets = [] OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self]) else: self.services = None # {peer_pubkey: ephemeral_address}, the address to reach them self.direct_connection_clients = {} # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata if config.get('general.insert_deniable_blocks', True): deniableBlockTimer = OnionrCommunicatorTimers(self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1) deniableBlockTimer.count = (deniableBlockTimer.frequency - 175) # Timer to check for connectivity, through Tor to various high-profile onion services netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1) # Announce the public API server transport address to other nodes if security level allows if config.get('general.security_level', 1) == 0 and config.get('general.announce_node', True): # Default to high security level incase config breaks announceTimer = OnionrCommunicatorTimers(self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1) announceTimer.count = (announceTimer.frequency - 120) else: logger.debug('Will not announce node.') # Timer to delete malfunctioning or long-dead peers cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True) # Timer to cleanup dead ephemeral forward secrecy keys forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1) # Adjust initial timer triggers peerPoolTimer.count = (peerPoolTimer.frequency - 1) cleanupTimer.count = (cleanupTimer.frequency - 60) blockCleanupTimer.count = (blockCleanupTimer.frequency - 2) lookup_blocks_timer = (lookup_blocks_timer.frequency - 2) shared_state.add(self) if config.get('general.use_bootstrap', True): bootstrappeers.add_bootstrap_list_to_peer_list(self, [], db_only=True) if not config.get('onboarding.done', True): logger.info('First run detected. Run openhome to get setup.', terminal=True) while not config.get('onboarding.done', True): time.sleep(5) # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking try: while not self.shutdown: for i in self.timers: if self.shutdown: break i.processTimer() time.sleep(self.delay) # Debug to print out used FDs (regular and net) #proc = psutil.Process() #print(proc.open_files(), len(psutil.net_connections())) except KeyboardInterrupt: self.shutdown = True pass logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True) try: self.service_greenlets except AttributeError: pass else: # Stop onionr direct connection services for server in self.service_greenlets: server.stop() localcommand.local_command('shutdown') # shutdown the api try: time.sleep(0.5) except KeyboardInterrupt: pass
def _add_to_hidden_blocks(cache): for bl in cache: localcommand.local_command('waitforshare/' + bl, post=True)
def remove(): localcommand.local_command(f'/mail/deletemsg/{blacklisted_data}', post=True)
def test_os_ver_endpoint(test_manager): if localcommand.local_command('os') != platform.system().lower(): raise ValueError('could not get proper os platform from endpoint /os')
def poll_chat(): """Endpoints peers get new messages from""" return Response(localcommand.local_command('/chat/gets/%s' % (g.peer, )))
atexit.register(readline.write_history_file, histfile) from onionrutils.localcommand import local_command from onionrutils.localcommand import get_hostname try: print('API file found, probably running on ' + get_hostname()) except TypeError: print('Onionr not running') sys.exit(1) print('1. get request (default)') print('2. post request') choice = input(">").lower().strip() post = False post_data = {} json = False endpoint = input("URL Endpoint: ") data = input("Data url param: ") if choice in ("2", "post", "post request"): post = True print("Enter post data") post_data = input() if post_data: print("Is this JSON?") json = input("y/n").lower().strip() if json == "y": json = True ret = local_command(endpoint, data=data, post=post, post_data=post_data, is_json=json) print("Response: \n", ret)
def _proper_shutdown(): localcommand.local_command('shutdown') sys.exit(1)
def daemon(): ''' Starts the Onionr communication daemon ''' if not hastor.has_tor(): logger.error("Tor is not present in system path or Onionr directory", terminal=True) cleanup.delete_run_files() sys.exit(1) # remove runcheck if it exists if os.path.isfile(filepaths.run_check_file): logger.debug( 'Runcheck file found on daemon start, deleting in advance.') os.remove(filepaths.run_check_file) # Create shared objects shared_state = toomanyobjs.TooMany() Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True, name='client HTTP API').start() Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True, name='public HTTP API').start() # Init run time tester (ensures Onionr is running right, for testing purposes) shared_state.get(runtests.OnionrRunTestManager) shared_state.get(serializeddata.SerializedData) shared_state.share_object() # share the parent object to the threads apiHost = '' while apiHost == '': try: with open(filepaths.public_API_host_file, 'r') as hostFile: apiHost = hostFile.read() except FileNotFoundError: pass time.sleep(0.5) logger.raw('', terminal=True) # print nice header thing :) if config.get('general.display_header', True): logoheader.header() version.version(verbosity=5, function=logger.info) logger.debug('Python version %s' % platform.python_version()) if onionrvalues.DEVELOPMENT_MODE: logger.warn('Development mode enabled', timestamp=False, terminal=True) net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost) shared_state.add(net) logger.info('Tor is starting...', terminal=True) if not net.startTor(): localcommand.local_command('shutdown') cleanup.delete_run_files() sys.exit(1) if len(net.myID) > 0 and config.get('general.security_level', 1) == 0: logger.debug('Started .onion service: %s' % (logger.colors.underline + net.myID)) else: logger.debug('.onion service disabled') logger.info( 'Using public key: %s' % (logger.colors.underline + getourkeypair.get_keypair()[0][:52])) try: time.sleep(1) except KeyboardInterrupt: pass events.event('init', threaded=False) events.event('daemon_start') communicator.startCommunicator(shared_state) localcommand.local_command('shutdown') net.killTor() try: time.sleep( 5 ) # Time to allow threads to finish, if not any "daemon" threads will be slaughtered http://docs.python.org/library/threading.html#threading.Thread.daemon except KeyboardInterrupt: pass cleanup.delete_run_files()
def insert_block(data: Union[str, bytes], header: str = 'txt', sign: bool = False, encryptType: str = '', symKey: str = '', asymPeer: str = '', meta: dict = {}, expire: Union[int, None] = None, disableForward: bool = False, signing_key: UserIDSecretKey = '') -> Union[str, bool]: """ Inserts a block into the network encryptType must be specified to encrypt a block """ our_private_key = crypto.priv_key our_pub_key = crypto.pub_key is_offline = True storage_counter = storagecounter.StorageCounter() allocationReachedMessage = 'Cannot insert block, disk allocation reached.' if storage_counter.is_full(): logger.error(allocationReachedMessage) raise onionrexceptions.DiskAllocationReached if not _check_upload_queue() is False: is_offline = False if signing_key != '': # if it was specified to use an alternative private key our_private_key = signing_key our_pub_key = bytesconverter.bytes_to_str(crypto.cryptoutils.get_pub_key_from_priv(our_private_key)) use_subprocess = powchoice.use_subprocess(config) retData = False if type(data) is None: raise ValueError('Data cannot be none') createTime = epoch.get_epoch() dataNonce = bytesconverter.bytes_to_str(crypto.hashers.sha3_hash(data)) try: with open(filepaths.data_nonce_file, 'r') as nonces: if dataNonce in nonces: return retData except FileNotFoundError: pass # record nonce with open(filepaths.data_nonce_file, 'a') as nonceFile: nonceFile.write(dataNonce + '\n') plaintext = data plaintextMeta = {} plaintextPeer = asymPeer retData = '' signature = '' signer = '' metadata = {} # metadata is full block metadata, meta is internal, user specified metadata # only use header if not set in provided meta meta['type'] = str(header) if encryptType in ('asym', 'sym'): metadata['encryptType'] = encryptType else: if not config.get('general.store_plaintext_blocks', True): raise onionrexceptions.InvalidMetadata("Plaintext blocks are disabled, yet a plaintext block was being inserted") if not encryptType in ('', None): raise onionrexceptions.InvalidMetadata('encryptType must be asym or sym, or blank') try: data = data.encode() except AttributeError: pass if encryptType == 'asym': meta['rply'] = createTime # Duplicate the time in encrypted messages to prevent replays if not disableForward and sign and asymPeer != our_pub_key: try: forwardEncrypted = onionrusers.OnionrUser(asymPeer).forwardEncrypt(data) data = forwardEncrypted[0] meta['forwardEnc'] = True expire = forwardEncrypted[2] # Expire time of key. no sense keeping block after that except onionrexceptions.InvalidPubkey: pass #onionrusers.OnionrUser(self, asymPeer).generateForwardKey() fsKey = onionrusers.OnionrUser(asymPeer).generateForwardKey() #fsKey = onionrusers.OnionrUser(self, asymPeer).getGeneratedForwardKeys().reverse() meta['newFSKey'] = fsKey jsonMeta = json.dumps(meta) plaintextMeta = jsonMeta if sign: signature = crypto.signing.ed_sign(jsonMeta.encode() + data, key=our_private_key, encodeResult=True) signer = our_pub_key if len(jsonMeta) > 1000: raise onionrexceptions.InvalidMetadata('meta in json encoded form must not exceed 1000 bytes') # encrypt block metadata/sig/content if encryptType == 'sym': raise NotImplementedError("not yet implemented") elif encryptType == 'asym': if stringvalidators.validate_pub_key(asymPeer): # Encrypt block data with forward secrecy key first, but not meta jsonMeta = json.dumps(meta) jsonMeta = crypto.encryption.pub_key_encrypt(jsonMeta, asymPeer, encodedData=True).decode() data = crypto.encryption.pub_key_encrypt(data, asymPeer, encodedData=False)#.decode() signature = crypto.encryption.pub_key_encrypt(signature, asymPeer, encodedData=True).decode() signer = crypto.encryption.pub_key_encrypt(signer, asymPeer, encodedData=True).decode() try: onionrusers.OnionrUser(asymPeer, saveUser=True) except ValueError: # if peer is already known pass else: raise onionrexceptions.InvalidPubkey(asymPeer + ' is not a valid base32 encoded ed25519 key') # compile metadata metadata['meta'] = jsonMeta if len(signature) > 0: # I don't like not pattern metadata['sig'] = signature metadata['signer'] = signer metadata['time'] = createTime # ensure expire is integer and of sane length if type(expire) is not type(None): if not len(str(int(expire))) < 20: raise ValueError('expire must be valid int less than 20 digits in length') metadata['expire'] = expire # send block data (and metadata) to POW module to get tokenized block data if use_subprocess: payload = subprocesspow.SubprocessPOW(data, metadata).start() else: payload = onionrproofs.POW(metadata, data).waitForResult() if payload != False: try: retData = onionrstorage.set_data(payload) except onionrexceptions.DiskAllocationReached: logger.error(allocationReachedMessage) retData = False else: # Tell the api server through localCommand to wait for the daemon to upload this block to make statistical analysis more difficult if not is_offline or localcommand.local_command('/ping', maxWait=10) == 'pong!': if config.get('general.security_level', 1) == 0: localcommand.local_command('/waitforshare/' + retData, post=True, maxWait=5) coredb.daemonqueue.daemon_queue_add('uploadBlock', retData) else: pass coredb.blockmetadb.add.add_to_block_DB(retData, selfInsert=True, dataSaved=True) if expire is None: coredb.blockmetadb.update_block_info(retData, 'expire', createTime + onionrvalues.DEFAULT_EXPIRE) else: coredb.blockmetadb.update_block_info(retData, 'expire', expire) blockmetadata.process_block_metadata(retData) if retData != False: if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS: events.event('insertdeniable', {'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer)}, threaded = True) else: events.event('insertblock', {'content': plaintext, 'meta': plaintextMeta, 'hash': retData, 'peer': bytesconverter.bytes_to_str(asymPeer)}, threaded = True) coredb.daemonqueue.daemon_queue_add('remove_from_insert_list', data= dataNonce) return retData