def lookup_new_peer_transports_with_communicator(comm_inst): logger.info('Looking up new addresses...') tryAmount = 1 newPeers = [] transports = gettransports.get() for i in range(tryAmount): # Download new peer address list from random online peers if len(newPeers) > 10000: # Don't get new peers if we have too many queued up break peer = onlinepeers.pick_online_peer(comm_inst) newAdders = peeraction.peer_action(comm_inst, peer, action='pex') try: newPeers = newAdders.split(',') except AttributeError: pass else: # Validate new peers are good format and not already in queue invalid = [] for x in newPeers: x = x.strip() if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x in transports: # avoid adding if its our address invalid.append(x) for x in invalid: try: newPeers.remove(x) except ValueError: pass comm_inst.newPeers.extend(newPeers) comm_inst.decrementThreadCount('lookup_new_peer_transports_with_communicator')
def announce_node(daemon): '''Announce our node to our peers''' ret_data = False announce_fail = False # Do not let announceCache get too large if len(daemon.announceCache) >= 10000: daemon.announceCache.popitem() if daemon.config.get('general.security_level', 0) == 0: # Announce to random online peers for i in daemon.onlinePeers: if not i in daemon.announceCache and not i in daemon.announceProgress: peer = i break else: peer = onlinepeers.pick_online_peer(daemon) for x in range(1): try: ourID = gettransports.get()[0] except IndexError: break url = 'http://' + peer + '/announce' data = {'node': ourID} combinedNodes = ourID + peer if ourID != 1: existingRand = bytesconverter.bytes_to_str(keydb.transportinfo.get_address_info(peer, 'powValue')) # Reset existingRand if it no longer meets the minimum POW if type(existingRand) is type(None) or not existingRand.endswith('0' * onionrvalues.ANNOUNCE_POW): existingRand = '' if peer in daemon.announceCache: data['random'] = daemon.announceCache[peer] elif len(existingRand) > 0: data['random'] = existingRand else: daemon.announceProgress[peer] = True proof = onionrproofs.DataPOW(combinedNodes, minDifficulty=onionrvalues.ANNOUNCE_POW) del daemon.announceProgress[peer] try: data['random'] = base64.b64encode(proof.waitForResult()[1]) except TypeError: # Happens when we failed to produce a proof logger.error("Failed to produce a pow for announcing to " + peer) announce_fail = True else: daemon.announceCache[peer] = data['random'] if not announce_fail: logger.info('Announcing node to ' + url) if basicrequests.do_post_request(url, data, port=daemon.shared_state.get(NetController).socksPort) == 'Success': logger.info('Successfully introduced node to ' + peer, terminal=True) ret_data = True keydb.transportinfo.set_address_info(peer, 'introduced', 1) keydb.transportinfo.set_address_info(peer, 'powValue', data['random']) daemon.decrementThreadCount('announce_node') return ret_data
def announce_node(daemon): """Announce our node to our peers.""" ret_data = False kv: "DeadSimpleKV" = daemon.shared_state.get_by_string("DeadSimpleKV") # Do not let announceCache get too large if len(kv.get('announceCache')) >= 10000: kv.get('announceCache').popitem() if daemon.config.get('general.security_level', 0) == 0: # Announce to random online peers for i in kv.get('onlinePeers'): if i not in kv.get('announceCache'): peer = i break else: try: peer = onlinepeers.pick_online_peer(daemon) except onionrexceptions.OnlinePeerNeeded: peer = "" try: ourID = gettransports.get()[0] if not peer: raise onionrexceptions.OnlinePeerNeeded except (IndexError, onionrexceptions.OnlinePeerNeeded): pass else: url = 'http://' + peer + '/announce' data = {'node': ourID} logger.info('Announcing node to ' + url) if basicrequests.do_post_request( url, data, port=daemon.shared_state.get(NetController).socksPort)\ == 'Success': logger.info('Successfully introduced node to ' + peer, terminal=True) ret_data = True keydb.transportinfo.set_address_info(peer, 'introduced', 1) daemon.decrementThreadCount('announce_node') return ret_data
def lookup_new_peer_transports_with_communicator(shared_state): logger.info('Looking up new addresses...') tryAmount = 1 newPeers = [] transports = gettransports.get() kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") for i in range(tryAmount): # Download new peer address list from random online peers if len(newPeers) > 10000: # Don't get new peers if we have too many queued up break try: peer = onlinepeers.pick_online_peer(kv) newAdders = peeraction.peer_action(shared_state, peer, action='pex') except onionrexceptions.OnlinePeerNeeded: continue try: newPeers = newAdders.split(',') except AttributeError: pass else: # Validate new peers are good format and not already in queue invalid = [] for x in newPeers: x = x.strip() if not stringvalidators.validate_transport(x) \ or x in kv.get('newPeers') or x in transports: # avoid adding if its our address invalid.append(x) for x in invalid: try: newPeers.remove(x) except ValueError: pass kv.get('newPeers').extend(newPeers)
def download_blocks_from_communicator(shared_state: "TooMany"): """Use communicator instance to download blocks in the comms's queue""" blacklist = onionrblacklist.OnionrBlackList() kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter count: int = 0 metadata_validation_result: bool = False # Iterate the block queue in the communicator for blockHash in list(kv.get('blockQueue')): count += 1 try: blockPeers = list(kv.get('blockQueue')[blockHash]) except KeyError: blockPeers = [] removeFromQueue = True if not shoulddownload.should_download(shared_state, blockHash): continue if kv.get('shutdown') or not kv.get('isOnline') or \ storage_counter.is_full(): # Exit loop if shutting down or offline, or disk allocation reached break # Do not download blocks being downloaded if blockHash in kv.get('currentDownloading'): continue if len(kv.get('onlinePeers')) == 0: break # So we can avoid concurrent downloading in other threads of same block kv.get('currentDownloading').append(blockHash) if len(blockPeers) == 0: try: peerUsed = onlinepeers.pick_online_peer(kv) except onionrexceptions.OnlinePeerNeeded: continue else: SystemRandom().shuffle(blockPeers) peerUsed = blockPeers.pop(0) if not kv.get('shutdown') and peerUsed.strip() != '': logger.info(f"Attempting to download %s from {peerUsed}..." % (blockHash[:12], )) content = peeraction.peer_action( shared_state, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer if content is not False and len(content) > 0: try: content = content.encode() except AttributeError: pass realHash = onionrcrypto.hashers.sha3_hash(content) try: realHash = realHash.decode( ) # bytes on some versions for some reason except AttributeError: pass if realHash == blockHash: #content = content.decode() # decode here because sha3Hash needs bytes above metas = blockmetadata.get_block_metadata_from_data( content ) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] try: metadata_validation_result = \ validatemetadata.validate_metadata(metadata, metas[2]) except onionrexceptions.PlaintextNotSupported: logger.debug( f"Not saving {blockHash} due to plaintext not enabled") removeFromQueue = True except onionrexceptions.DataExists: metadata_validation_result = False if metadata_validation_result: # check if metadata is valid, and verify nonce if onionrcrypto.cryptoutils.verify_POW( content): # check if POW is enough/correct logger.info('Attempting to save block %s...' % blockHash[:12]) try: onionrstorage.set_data(content) except onionrexceptions.DataExists: logger.warn('Data is already set for %s ' % (blockHash, )) except onionrexceptions.DiskAllocationReached: logger.error( 'Reached disk allocation allowance, cannot save block %s.' % (blockHash, )) removeFromQueue = False else: blockmetadb.add_to_block_DB( blockHash, dataSaved=True) # add block to meta db blockmetadata.process_block_metadata( blockHash ) # caches block metadata values to block database spawn(local_command, f'/daemon-event/upload_event', post=True, is_json=True, post_data={'block': blockHash}) else: logger.warn('POW failed for block %s.' % (blockHash, )) else: if blacklist.inBlacklist(realHash): logger.warn('Block %s is blacklisted.' % (realHash, )) else: logger.warn('Metadata for block %s is invalid.' % (blockHash, )) blacklist.addToDB(blockHash) else: # if block didn't meet expected hash tempHash = onionrcrypto.hashers.sha3_hash( content) # lazy hack, TODO use var try: tempHash = tempHash.decode() except AttributeError: pass # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed).addScore(-50) if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) else: removeFromQueue = False # Don't remove from queue if 404 if removeFromQueue: try: del kv.get( 'blockQueue' )[blockHash] # remove from block queue both if success or false if count == LOG_SKIP_COUNT: logger.info('%s blocks remaining in queue' % [len(kv.get('blockQueue'))], terminal=True) count = 0 except KeyError: pass kv.get('currentDownloading').remove(blockHash)
def upload_blocks_from_communicator(comm_inst: 'OnionrCommunicatorDaemon'): """Accept a communicator instance + upload blocks from its upload queue.""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string("DeadSimpleKV") TIMER_NAME = "upload_blocks_from_communicator" session_manager: sessionmanager.BlockUploadSessionManager session_manager = comm_inst.shared_state.get( sessionmanager.BlockUploadSessionManager) tried_peers: UserID = [] finishedUploads = [] kv.put('blocksToUpload', onionrcrypto.cryptoutils.random_shuffle(kv.get('blocksToUpload'))) def remove_from_hidden(bl): sleep(60) try: comm_inst.shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(bl) except ValueError: pass if len(kv.get('blocksToUpload')) != 0: for bl in kv.get('blocksToUpload'): if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) comm_inst.decrementThreadCount(TIMER_NAME) return session = session_manager.add_session(bl) for _ in range(min(len(kv.get('onlinePeers')), 6)): try: peer = onlinepeers.pick_online_peer(comm_inst) except onionrexceptions.OnlinePeerNeeded: continue try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in tried_peers: continue tried_peers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() except onionrexceptions.NoDataAvailable: finishedUploads.append(bl) break proxy_type = proxypicker.pick_proxy(peer) logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxy_type, content_type='application/octet-stream') if resp is not False: if resp == 'success': Thread(target=remove_from_hidden, args=[bl], daemon=True).start() session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) comm_inst.getPeerProfileInstance(peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: kv.get('blocksToUpload').remove(x) comm_inst.shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(x) except ValueError: pass comm_inst.decrementThreadCount(TIMER_NAME)
def lookup_blocks_from_communicator(comm_inst): logger.info('Looking up new blocks') tryAmount = 2 newBlocks = '' existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks triedPeers = [] # list of peers we've tried this time around maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion lastLookupTime = 0 # Last time we looked up a particular peer's list new_block_count = 0 for i in range(tryAmount): listLookupCommand = 'getblocklist' # This is defined here to reset it each time if len(comm_inst.blockQueue) >= maxBacklog: break if not comm_inst.isOnline: break # check if disk allocation is used if comm_inst.storage_counter.is_full(): logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used') break peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer # if we've already tried all the online peers this time around, stop if peer in triedPeers: if len(comm_inst.onlinePeers) == len(triedPeers): break else: continue triedPeers.append(peer) # Get the last time we looked up a peer's stamp to only fetch blocks since then. # Saved in memory only for privacy reasons try: lastLookupTime = comm_inst.dbTimestamps[peer] except KeyError: lastLookupTime = 0 else: listLookupCommand += '?date=%s' % (lastLookupTime,) try: newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes except Exception as error: logger.warn('Could not get new blocks from %s.' % peer, error = error) newBlocks = False else: comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60) if newBlocks != False: # if request was a success for i in newBlocks.split('\n'): if stringvalidators.validate_hash(i): i = reconstructhash.reconstruct_hash(i) # if newline seperated string is valid hash if not i in existingBlocks: # if block does not exist on disk and is not already in block queue if i not in comm_inst.blockQueue: if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i): if len(comm_inst.blockQueue) <= 1000000: comm_inst.blockQueue[i] = [peer] # add blocks to download queue new_block_count += 1 else: if peer not in comm_inst.blockQueue[i]: if len(comm_inst.blockQueue[i]) < 10: comm_inst.blockQueue[i].append(peer) if new_block_count > 0: block_string = "" if new_block_count > 1: block_string = "s" logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True) comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99) comm_inst.decrementThreadCount('lookup_blocks_from_communicator') return
def upload_blocks_from_communicator(comm_inst: OnionrCommunicatorDaemon): """Accepts a communicator instance and uploads blocks from its upload queue""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" TIMER_NAME = "upload_blocks_from_communicator" session_manager: sessionmanager.BlockUploadSessionManager = comm_inst.shared_state.get( sessionmanager.BlockUploadSessionManager) triedPeers = [] finishedUploads = [] comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle( comm_inst.blocksToUpload) if len(comm_inst.blocksToUpload) != 0: for bl in comm_inst.blocksToUpload: if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) comm_inst.decrementThreadCount(TIMER_NAME) return session = session_manager.add_session(bl) for i in range(min(len(comm_inst.onlinePeers), 6)): peer = onlinepeers.pick_online_peer(comm_inst) try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in triedPeers: continue triedPeers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() except onionrexceptions.NoDataAvailable: finishedUploads.append(bl) break proxyType = proxypicker.pick_proxy(peer) logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxyType, content_type='application/octet-stream') if not resp == False: if resp == 'success': session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) comm_inst.getPeerProfileInstance(peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp[:15]}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: comm_inst.blocksToUpload.remove(x) except ValueError: pass comm_inst.decrementThreadCount(TIMER_NAME)
def lookup_blocks_from_communicator(comm_inst): logger.info('Looking up new blocks') tryAmount = 2 newBlocks = '' # List of existing saved blocks existingBlocks = get_block_list() triedPeers = [] # list of peers we've tried this time around # Max amount of *new* block hashes to have in queue maxBacklog = 1560 lastLookupTime = 0 # Last time we looked up a particular peer's list new_block_count = 0 for i in range(tryAmount): # Defined here to reset it each time, time offset is added later listLookupCommand = 'getblocklist' if len(comm_inst.blockQueue) >= maxBacklog: break if not comm_inst.isOnline: break # check if disk allocation is used if comm_inst.storage_counter.is_full(): logger.debug( 'Not looking up new blocks due to maximum amount of disk used') break try: # select random online peer peer = onlinepeers.pick_online_peer(comm_inst) except onionrexceptions.OnlinePeerNeeded: time.sleep(1) continue # if we've already tried all the online peers this time around, stop if peer in triedPeers: if len(comm_inst.onlinePeers) == len(triedPeers): break else: continue triedPeers.append(peer) # Get the last time we looked up a peer's stamp, # to only fetch blocks since then. # Saved in memory only for privacy reasons try: lastLookupTime = comm_inst.dbTimestamps[peer] except KeyError: lastLookupTime = epoch.get_epoch() - \ config.get("general.max_block_age", onionrvalues.DEFAULT_EXPIRE) listLookupCommand += '?date=%s' % (lastLookupTime,) try: newBlocks = peeraction.peer_action( comm_inst, peer, listLookupCommand) # get list of new block hashes except Exception as error: logger.warn( f'Could not get new blocks from {peer}.', error=error) newBlocks = False if newBlocks != False: # noqa # if request was a success for i in newBlocks.split('\n'): if stringvalidators.validate_hash(i): i = reconstructhash.reconstruct_hash(i) # if newline seperated string is valid hash # if block does not exist on disk + is not already in queue if i not in existingBlocks: if i not in comm_inst.blockQueue: if onionrproofs.hashMeetsDifficulty(i) and \ not blacklist.inBlacklist(i): if len(comm_inst.blockQueue) <= 1000000: # add blocks to download queue comm_inst.blockQueue[i] = [peer] new_block_count += 1 comm_inst.dbTimestamps[peer] = \ epoch.get_rounded_epoch(roundS=60) else: if peer not in comm_inst.blockQueue[i]: if len(comm_inst.blockQueue[i]) < 10: comm_inst.blockQueue[i].append(peer) if new_block_count > 0: block_string = "" if new_block_count > 1: block_string = "s" logger.info( f'Discovered {new_block_count} new block{block_string}', terminal=True) comm_inst.download_blocks_timer.count = \ int(comm_inst.download_blocks_timer.frequency * 0.99) comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"): '''Use communicator instance to download blocks in the comms's queue''' blacklist = onionrblacklist.OnionrBlackList() storage_counter = storagecounter.StorageCounter() LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter count: int = 0 metadata_validation_result: bool = False # Iterate the block queue in the communicator for blockHash in list(comm_inst.blockQueue): count += 1 if len(comm_inst.onlinePeers) == 0: break triedQueuePeers = [] # List of peers we've tried for a block try: blockPeers = list(comm_inst.blockQueue[blockHash]) except KeyError: blockPeers = [] removeFromQueue = True if not shoulddownload.should_download(comm_inst, blockHash): continue if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full( ): # Exit loop if shutting down or offline, or disk allocation reached break # Do not download blocks being downloaded if blockHash in comm_inst.currentDownloading: continue comm_inst.currentDownloading.append( blockHash ) # So we can avoid concurrent downloading in other threads of same block if len(blockPeers) == 0: peerUsed = onlinepeers.pick_online_peer(comm_inst) else: blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers) peerUsed = blockPeers.pop(0) if not comm_inst.shutdown and peerUsed.strip() != '': logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed)) content = peeraction.peer_action( comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000 ) # block content from random peer (includes metadata) if content != False and len(content) > 0: try: content = content.encode() except AttributeError: pass realHash = onionrcrypto.hashers.sha3_hash(content) try: realHash = realHash.decode( ) # bytes on some versions for some reason except AttributeError: pass if realHash == blockHash: #content = content.decode() # decode here because sha3Hash needs bytes above metas = blockmetadata.get_block_metadata_from_data( content ) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] try: metadata_validation_result = validatemetadata.validate_metadata( metadata, metas[2]) except onionrexceptions.DataExists: metadata_validation_result = False if metadata_validation_result: # check if metadata is valid, and verify nonce if onionrcrypto.cryptoutils.verify_POW( content): # check if POW is enough/correct logger.info('Attempting to save block %s...' % blockHash[:12]) try: onionrstorage.set_data(content) except onionrexceptions.DataExists: logger.warn('Data is already set for %s ' % (blockHash, )) except onionrexceptions.DiskAllocationReached: logger.error( 'Reached disk allocation allowance, cannot save block %s.' % (blockHash, )) removeFromQueue = False else: blockmetadb.add_to_block_DB( blockHash, dataSaved=True) # add block to meta db blockmetadata.process_block_metadata( blockHash ) # caches block metadata values to block database else: logger.warn('POW failed for block %s.' % (blockHash, )) else: if blacklist.inBlacklist(realHash): logger.warn('Block %s is blacklisted.' % (realHash, )) else: logger.warn('Metadata for block %s is invalid.' % (blockHash, )) blacklist.addToDB(blockHash) else: # if block didn't meet expected hash tempHash = onionrcrypto.hashers.sha3_hash( content) # lazy hack, TODO use var try: tempHash = tempHash.decode() except AttributeError: pass # Punish peer for sharing invalid block (not always malicious, but is bad regardless) onionrpeers.PeerProfiles(peerUsed).addScore(-50) if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) else: removeFromQueue = False # Don't remove from queue if 404 if removeFromQueue: try: del comm_inst.blockQueue[ blockHash] # remove from block queue both if success or false if count == LOG_SKIP_COUNT: logger.info('%s blocks remaining in queue' % [len(comm_inst.blockQueue)], terminal=True) count = 0 except KeyError: pass comm_inst.currentDownloading.remove(blockHash) comm_inst.decrementThreadCount('getBlocks')
def upload_blocks_from_communicator(shared_state: 'OnionrCommunicatorDaemon'): """Accept a communicator instance + upload blocks from its upload queue.""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") session_manager: sessionmanager.BlockUploadSessionManager session_manager = shared_state.get( sessionmanager.BlockUploadSessionManager) tried_peers: UserID = [] finishedUploads = [] SystemRandom().shuffle(kv.get('blocksToUpload')) def remove_from_hidden(bl): sleep(60) try: shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(bl) except ValueError: pass if len(kv.get('blocksToUpload')) != 0: for bl in kv.get('blocksToUpload'): if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) return session = session_manager.add_session(bl) for _ in range(min(len(kv.get('onlinePeers')), 6)): try: peer = onlinepeers.pick_online_peer(kv) if not block.Block(bl).isEncrypted: if peer in kv.get('plaintextDisabledPeers'): logger.info(f"Cannot upload plaintext block to peer that denies it {peer}") # noqa continue except onionrexceptions.OnlinePeerNeeded: continue try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in tried_peers: continue tried_peers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() if not data: logger.warn( f"Couldn't get data for block in upload list {bl}", terminal=True) raise onionrexceptions.NoDataAvailable try: def __check_metadata(): metadata = get_block_metadata_from_data(data)[0] if not validate_metadata(metadata, data): logger.warn( f"Metadata for uploading block not valid {bl}") raise onionrexceptions.InvalidMetadata __check_metadata() except onionrexceptions.DataExists: pass except( # noqa onionrexceptions.NoDataAvailable, onionrexceptions.InvalidMetadata) as _: finishedUploads.append(bl) break proxy_type = proxypicker.pick_proxy(peer) logger.info( f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxy_type, content_type='application/octet-stream') if resp is not False: if resp == 'success': Thread(target=remove_from_hidden, args=[bl], daemon=True).start() session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) shared_state.get_by_string( 'OnionrCommunicatorDaemon').getPeerProfileInstance( peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: kv.get('blocksToUpload').remove(x) shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(x) except ValueError: pass