def test_hash_validator(self): valid = [ '00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00c96\n', '00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00c96', b'00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00c96', '00003b3813a166e706e490238e9515633cc36', b'00003b3813a166e706e490238e9515633cc3d083' ] invalid = [ None, 0, 1, True, False, '%#W483242#', '00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00c9666', '', b'', b'00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00c9666666', b' ', '\n', '00003b3813a166e706e490238e9515633cc3d083efe982a67753d50d87a00ccccc\n' ] for x in valid: self.assertTrue(stringvalidators.validate_hash(x)) for x in invalid: try: result = stringvalidators.validate_hash(x) print('testing', x, result) except AttributeError: result = False try: self.assertFalse(result) except AssertionError: raise AssertionError("%s returned true" % (x, ))
def get_block_data(public_API, b_hash): """return block data by hash unless we are hiding it""" resp = '' b_hash = reconstructhash.reconstruct_hash(b_hash) if stringvalidators.validate_hash(b_hash): if not config.get('general.hide_created_blocks', True) \ or b_hash not in public_API.hideBlocks: if b_hash in public_API._too_many.get(BlockList).get(): block = apiutils.GetBlockData().get_block_data(b_hash, raw=True, decrypt=False) try: # Encode in case data is binary block = block.encode('utf-8') except AttributeError: # 404 if no block data if not block: abort(404) if not len(block): abort(404) resp = block if len(resp) == 0: abort(404) resp = "" # Has to be octet stream, otherwise binary data fails hash check return Response(resp, mimetype='application/octet-stream')
def get_block_data(self, bHash, decrypt=False, raw=False, headerOnly=False): if not stringvalidators.validate_hash(bHash): raise onionrexceptions.InvalidHexHash( "block hash not valid hash format") bl = onionrblockapi.Block(bHash) if decrypt: bl.decrypt() if bl.isEncrypted and not bl.decrypted: raise ValueError if not raw: if not headerOnly: retData = {'meta':bl.bheader, 'metadata': bl.bmetadata, 'content': bl.bcontent} for x in list(retData.keys()): try: retData[x] = retData[x].decode() except AttributeError: pass else: validSig = False signer = bytesconverter.bytes_to_str(bl.signer) if bl.isSigned() and stringvalidators.validate_pub_key(signer) and bl.isSigner(signer): validSig = True bl.bheader['validSig'] = validSig bl.bheader['meta'] = '' retData = {'meta': bl.bheader, 'metadata': bl.bmetadata} return json.dumps(retData) else: return bl.raw
def ban_block(): """Deletes a block, permanently blacklisting it""" blacklist = onionrblacklist.OnionrBlackList() try: ban = sys.argv[2] except IndexError: # Get the hash if its not provided as a CLI argument ban = logger.readline('Enter a block hash:').strip() # Make sure the hash has no truncated zeroes ban = reconstructhash.reconstruct_hash(ban) if stringvalidators.validate_hash(ban): if not blacklist.inBlacklist(ban): try: blacklist.addToDB(ban) removeblock.remove_block(ban) deleteBlock(ban) except Exception as error: logger.error('Could not blacklist block', error=error, terminal=True) else: logger.info('Block blacklisted', terminal=True) else: logger.warn('That block is already blacklisted', terminal=True) else: logger.error('Invalid block hash', terminal=True)
def get_file(): """Get a file from onionr blocks.""" try: file_name = _get_dir(sys.argv[2]) bHash = sys.argv[3] except IndexError: logger.error("Syntax %s %s" % (sys.argv[0], '/path/to/filename <blockhash>'), terminal=True) else: logger.info(file_name, terminal=True) if os.path.exists(file_name): logger.error("File already exists", terminal=True) return if not stringvalidators.validate_hash(bHash): logger.error('Block hash is invalid', terminal=True) return try: with open(file_name, 'wb') as my_file: my_file.write(Block(bHash).bcontent) except onionrexceptions.NoDataAvailable: logger.error( 'That block is not available. Trying again later may work.', terminal=True)
def site_file(name: str, file: str)->Response: """Accept a site 'name', if pubkey then show multi-page site, if hash show single page site""" resp: str = 'Not Found' mime_type = mimetypes.MimeTypes().guess_type(file)[0] # If necessary convert the name to base32 from mnemonic if mnemonickeys.DELIMITER in name: name = mnemonickeys.get_base32(name) # Now make sure the key is regardless a valid base32 format ed25519 key (readding padding if necessary) if stringvalidators.validate_pub_key(name): name = unpaddedbase32.repad(name) resp = sitefiles.get_file(name, file) elif stringvalidators.validate_hash(name): try: resp = onionrblockapi.Block(name).bcontent except onionrexceptions.NoDataAvailable: abort(404) except TypeError: pass try: resp = base64.b64decode(resp) except binascii.Error: pass if resp == 'Not Found' or not resp: abort(404) return Response(resp, mimetype=mime_type)
def getBlockBodyData(name): resp = '' if stringvalidators.validate_hash(name): try: resp = onionrblockapi.Block(name, decrypt=True).bcontent except TypeError: pass else: abort(404) return Response(resp)
def export_block(): exportDir = filepaths.export_location try: if not stringvalidators.validate_hash(sys.argv[2]): raise ValueError except (IndexError, ValueError): logger.error('No valid block hash specified.', terminal=True) sys.exit(1) else: bHash = sys.argv[2] doExport(bHash)
def __init__(self, block_hash: Union[str, bytes]): block_hash = bytesconverter.bytes_to_str(block_hash) block_hash = reconstructhash.reconstruct_hash(block_hash) if not stringvalidators.validate_hash(block_hash): raise ValueError self.start_time = epoch.get_epoch() self.block_hash = reconstructhash.deconstruct_hash(block_hash) self.total_fail_count: int = 0 self.total_success_count: int = 0 self.peer_fails = {} self.peer_exists = {}
def mail_delete(block): if not stringvalidators.validate_hash(block): abort(504) block = reconstructhash.deconstruct_hash(block) existing = kv.get('deleted_mail') if existing is None: existing = [] if block not in existing: existing.append(block) kv.put('deleted_mail', existing) kv.flush() return 'success'
def getData(name): resp = "" if stringvalidators.validate_hash(name): if name in blockmetadb.get_block_list(): try: resp = client_get_block.get_block_data(name, decrypt=True) except ValueError: pass else: abort(404) else: abort(404) return Response(resp)
def export_block(*args): """Export block based on hash from stdin or argv.""" if args: b_hash = args[0] else: try: if not stringvalidators.validate_hash(sys.argv[2]): raise ValueError except (IndexError, ValueError): logger.error('No valid block hash specified.', terminal=True) sys.exit(1) else: b_hash = sys.argv[2] _do_export(b_hash)
def store(data, blockHash=''): if not stringvalidators.validate_hash(blockHash): raise ValueError ourHash = hashers.sha3_hash(data) if blockHash != '': if not ourHash == blockHash: raise ValueError( 'Hash specified does not meet internal hash check') else: blockHash = ourHash if DB_ENTRY_SIZE_LIMIT >= sys.getsizeof(data): _dbInsert(blockHash, data) else: with open('%s/%s.dat' % (filepaths.block_data_location, blockHash), 'wb') as blockFile: blockFile.write(data)
def remove_block(block): """Remove a block from this node. (does not automatically blacklist). **You may want blacklist.addToDB(blockHash) """ if stringvalidators.validate_hash(block): conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30) c = conn.cursor() t = (block,) c.execute('Delete from hashes where hash=?;', t) conn.commit() conn.close() dataSize = sys.getsizeof(onionrstorage.getData(block)) storagecounter.StorageCounter().remove_bytes(dataSize) else: raise onionrexceptions.InvalidHexHash
def store(data, block_hash=''): if not stringvalidators.validate_hash(block_hash): raise ValueError ourHash = hashers.sha3_hash(data) if block_hash != '': if not ourHash == block_hash: raise ValueError( 'Hash specified does not meet internal hash check') else: block_hash = ourHash if DB_ENTRY_SIZE_LIMIT >= sys.getsizeof(data): _dbInsert(block_hash, data) else: with open(f'{block_data_location}/{block_hash}{BLOCK_EXPORT_FILE_EXT}', 'wb') as blck_file: blck_file.write(data)
def has_block(hash: str) -> bool: """Check for new block in the block meta db.""" conn = sqlite3.connect( dbfiles.block_meta_db, timeout=onionrvalues.DATABASE_LOCK_TIMEOUT) c = conn.cursor() if not stringvalidators.validate_hash(hash): raise onionrexceptions.InvalidHexHash("Invalid hash") for result in c.execute("SELECT COUNT() FROM hashes WHERE hash = ?", (hash,)): if result[0] >= 1: conn.commit() conn.close() return True else: conn.commit() conn.close() return False return False
def getData(bHash): if not stringvalidators.validate_hash(bHash): raise ValueError bHash = bytesconverter.bytes_to_str(bHash) # First check DB for data entry by hash # if no entry, check disk # If no entry in either, raise an exception retData = None fileLocation = '%s/%s.dat' % (filepaths.block_data_location, bHash) not_found_msg = "Flock data not found for: " if os.path.exists(fileLocation): with open(fileLocation, 'rb') as block: retData = block.read() else: retData = _dbFetch(bHash) if retData is None: raise onionrexceptions.NoDataAvailable(not_found_msg + str(bHash)) return retData
def get_block_data(publicAPI, data): """data is the block hash in hex""" resp = '' if stringvalidators.validate_hash(data): if not config.get('general.hide_created_blocks', True) or data not in publicAPI.hideBlocks: if data in publicAPI._too_many.get(BlockList).get(): block = apiutils.GetBlockData().get_block_data(data, raw=True, decrypt=False) try: block = block.encode('utf-8') # Encode in case data is binary except AttributeError: if len(block) == 0: abort(404) block = bytesconverter.str_to_bytes(block) resp = block if len(resp) == 0: abort(404) resp = "" # Has to be octet stream, otherwise binary data fails hash check return Response(resp, mimetype='application/octet-stream')
def remove_block(block): """Remove a block from this node. (does not automatically blacklist). **You may want blacklist.addToDB(blockHash) """ if stringvalidators.validate_hash(block): try: data_size = sys.getsizeof(onionrstorage.getData(block)) except onionrexceptions.NoDataAvailable: data_size = 0 conn = sqlite3.connect(dbfiles.block_meta_db, timeout=DATABASE_LOCK_TIMEOUT) c = conn.cursor() t = (block, ) c.execute('Delete from hashes where hash=?;', t) conn.commit() conn.close() if data_size: storage_counter.remove_bytes(data_size) else: raise onionrexceptions.InvalidHexHash
def upload_blocks_from_communicator(comm_inst: 'OnionrCommunicatorDaemon'): """Accept a communicator instance + upload blocks from its upload queue.""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string("DeadSimpleKV") TIMER_NAME = "upload_blocks_from_communicator" session_manager: sessionmanager.BlockUploadSessionManager session_manager = comm_inst.shared_state.get( sessionmanager.BlockUploadSessionManager) tried_peers: UserID = [] finishedUploads = [] kv.put('blocksToUpload', onionrcrypto.cryptoutils.random_shuffle(kv.get('blocksToUpload'))) def remove_from_hidden(bl): sleep(60) try: comm_inst.shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(bl) except ValueError: pass if len(kv.get('blocksToUpload')) != 0: for bl in kv.get('blocksToUpload'): if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) comm_inst.decrementThreadCount(TIMER_NAME) return session = session_manager.add_session(bl) for _ in range(min(len(kv.get('onlinePeers')), 6)): try: peer = onlinepeers.pick_online_peer(comm_inst) except onionrexceptions.OnlinePeerNeeded: continue try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in tried_peers: continue tried_peers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() except onionrexceptions.NoDataAvailable: finishedUploads.append(bl) break proxy_type = proxypicker.pick_proxy(peer) logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxy_type, content_type='application/octet-stream') if resp is not False: if resp == 'success': Thread(target=remove_from_hidden, args=[bl], daemon=True).start() session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) comm_inst.getPeerProfileInstance(peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: kv.get('blocksToUpload').remove(x) comm_inst.shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(x) except ValueError: pass comm_inst.decrementThreadCount(TIMER_NAME)
def lookup_blocks_from_communicator(comm_inst): logger.info('Looking up new blocks') tryAmount = 2 newBlocks = '' existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks triedPeers = [] # list of peers we've tried this time around maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion lastLookupTime = 0 # Last time we looked up a particular peer's list new_block_count = 0 for i in range(tryAmount): listLookupCommand = 'getblocklist' # This is defined here to reset it each time if len(comm_inst.blockQueue) >= maxBacklog: break if not comm_inst.isOnline: break # check if disk allocation is used if comm_inst.storage_counter.is_full(): logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used') break peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer # if we've already tried all the online peers this time around, stop if peer in triedPeers: if len(comm_inst.onlinePeers) == len(triedPeers): break else: continue triedPeers.append(peer) # Get the last time we looked up a peer's stamp to only fetch blocks since then. # Saved in memory only for privacy reasons try: lastLookupTime = comm_inst.dbTimestamps[peer] except KeyError: lastLookupTime = 0 else: listLookupCommand += '?date=%s' % (lastLookupTime,) try: newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes except Exception as error: logger.warn('Could not get new blocks from %s.' % peer, error = error) newBlocks = False else: comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60) if newBlocks != False: # if request was a success for i in newBlocks.split('\n'): if stringvalidators.validate_hash(i): i = reconstructhash.reconstruct_hash(i) # if newline seperated string is valid hash if not i in existingBlocks: # if block does not exist on disk and is not already in block queue if i not in comm_inst.blockQueue: if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i): if len(comm_inst.blockQueue) <= 1000000: comm_inst.blockQueue[i] = [peer] # add blocks to download queue new_block_count += 1 else: if peer not in comm_inst.blockQueue[i]: if len(comm_inst.blockQueue[i]) < 10: comm_inst.blockQueue[i].append(peer) if new_block_count > 0: block_string = "" if new_block_count > 1: block_string = "s" logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True) comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99) comm_inst.decrementThreadCount('lookup_blocks_from_communicator') return
def upload_blocks_from_communicator(shared_state: 'OnionrCommunicatorDaemon'): """Accept a communicator instance + upload blocks from its upload queue.""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV") session_manager: sessionmanager.BlockUploadSessionManager session_manager = shared_state.get( sessionmanager.BlockUploadSessionManager) tried_peers: UserID = [] finishedUploads = [] SystemRandom().shuffle(kv.get('blocksToUpload')) def remove_from_hidden(bl): sleep(60) try: shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(bl) except ValueError: pass if len(kv.get('blocksToUpload')) != 0: for bl in kv.get('blocksToUpload'): if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) return session = session_manager.add_session(bl) for _ in range(min(len(kv.get('onlinePeers')), 6)): try: peer = onlinepeers.pick_online_peer(kv) if not block.Block(bl).isEncrypted: if peer in kv.get('plaintextDisabledPeers'): logger.info(f"Cannot upload plaintext block to peer that denies it {peer}") # noqa continue except onionrexceptions.OnlinePeerNeeded: continue try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in tried_peers: continue tried_peers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() if not data: logger.warn( f"Couldn't get data for block in upload list {bl}", terminal=True) raise onionrexceptions.NoDataAvailable try: def __check_metadata(): metadata = get_block_metadata_from_data(data)[0] if not validate_metadata(metadata, data): logger.warn( f"Metadata for uploading block not valid {bl}") raise onionrexceptions.InvalidMetadata __check_metadata() except onionrexceptions.DataExists: pass except( # noqa onionrexceptions.NoDataAvailable, onionrexceptions.InvalidMetadata) as _: finishedUploads.append(bl) break proxy_type = proxypicker.pick_proxy(peer) logger.info( f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxy_type, content_type='application/octet-stream') if resp is not False: if resp == 'success': Thread(target=remove_from_hidden, args=[bl], daemon=True).start() session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) shared_state.get_by_string( 'OnionrCommunicatorDaemon').getPeerProfileInstance( peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: kv.get('blocksToUpload').remove(x) shared_state.get_by_string( 'PublicAPI').hideBlocks.remove(x) except ValueError: pass
def lookup_blocks_from_communicator(comm_inst): logger.info('Looking up new blocks') tryAmount = 2 newBlocks = '' # List of existing saved blocks existingBlocks = get_block_list() triedPeers = [] # list of peers we've tried this time around # Max amount of *new* block hashes to have in queue maxBacklog = 1560 lastLookupTime = 0 # Last time we looked up a particular peer's list new_block_count = 0 for i in range(tryAmount): # Defined here to reset it each time, time offset is added later listLookupCommand = 'getblocklist' if len(comm_inst.blockQueue) >= maxBacklog: break if not comm_inst.isOnline: break # check if disk allocation is used if comm_inst.storage_counter.is_full(): logger.debug( 'Not looking up new blocks due to maximum amount of disk used') break try: # select random online peer peer = onlinepeers.pick_online_peer(comm_inst) except onionrexceptions.OnlinePeerNeeded: time.sleep(1) continue # if we've already tried all the online peers this time around, stop if peer in triedPeers: if len(comm_inst.onlinePeers) == len(triedPeers): break else: continue triedPeers.append(peer) # Get the last time we looked up a peer's stamp, # to only fetch blocks since then. # Saved in memory only for privacy reasons try: lastLookupTime = comm_inst.dbTimestamps[peer] except KeyError: lastLookupTime = epoch.get_epoch() - \ config.get("general.max_block_age", onionrvalues.DEFAULT_EXPIRE) listLookupCommand += '?date=%s' % (lastLookupTime,) try: newBlocks = peeraction.peer_action( comm_inst, peer, listLookupCommand) # get list of new block hashes except Exception as error: logger.warn( f'Could not get new blocks from {peer}.', error=error) newBlocks = False if newBlocks != False: # noqa # if request was a success for i in newBlocks.split('\n'): if stringvalidators.validate_hash(i): i = reconstructhash.reconstruct_hash(i) # if newline seperated string is valid hash # if block does not exist on disk + is not already in queue if i not in existingBlocks: if i not in comm_inst.blockQueue: if onionrproofs.hashMeetsDifficulty(i) and \ not blacklist.inBlacklist(i): if len(comm_inst.blockQueue) <= 1000000: # add blocks to download queue comm_inst.blockQueue[i] = [peer] new_block_count += 1 comm_inst.dbTimestamps[peer] = \ epoch.get_rounded_epoch(roundS=60) else: if peer not in comm_inst.blockQueue[i]: if len(comm_inst.blockQueue[i]) < 10: comm_inst.blockQueue[i].append(peer) if new_block_count > 0: block_string = "" if new_block_count > 1: block_string = "s" logger.info( f'Discovered {new_block_count} new block{block_string}', terminal=True) comm_inst.download_blocks_timer.count = \ int(comm_inst.download_blocks_timer.frequency * 0.99) comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
def get_block_data(block): if not stringvalidators.validate_hash(block): raise ValueError return Response(Block(block).raw, mimetype='application/octet-stream')
def upload_blocks_from_communicator(comm_inst: OnionrCommunicatorDaemon): """Accepts a communicator instance and uploads blocks from its upload queue""" """when inserting a block, we try to upload it to a few peers to add some deniability & increase functionality""" TIMER_NAME = "upload_blocks_from_communicator" session_manager: sessionmanager.BlockUploadSessionManager = comm_inst.shared_state.get( sessionmanager.BlockUploadSessionManager) triedPeers = [] finishedUploads = [] comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle( comm_inst.blocksToUpload) if len(comm_inst.blocksToUpload) != 0: for bl in comm_inst.blocksToUpload: if not stringvalidators.validate_hash(bl): logger.warn('Requested to upload invalid block', terminal=True) comm_inst.decrementThreadCount(TIMER_NAME) return session = session_manager.add_session(bl) for i in range(min(len(comm_inst.onlinePeers), 6)): peer = onlinepeers.pick_online_peer(comm_inst) try: session.peer_exists[peer] continue except KeyError: pass try: if session.peer_fails[peer] > 3: continue except KeyError: pass if peer in triedPeers: continue triedPeers.append(peer) url = f'http://{peer}/upload' try: data = block.Block(bl).getRaw() except onionrexceptions.NoDataAvailable: finishedUploads.append(bl) break proxyType = proxypicker.pick_proxy(peer) logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True) resp = basicrequests.do_post_request( url, data=data, proxyType=proxyType, content_type='application/octet-stream') if not resp == False: if resp == 'success': session.success() session.peer_exists[peer] = True elif resp == 'exists': session.success() session.peer_exists[peer] = True else: session.fail() session.fail_peer(peer) comm_inst.getPeerProfileInstance(peer).addScore(-5) logger.warn( f'Failed to upload {bl[:8]}, reason: {resp[:15]}', terminal=True) else: session.fail() session_manager.clean_session() for x in finishedUploads: try: comm_inst.blocksToUpload.remove(x) except ValueError: pass comm_inst.decrementThreadCount(TIMER_NAME)