예제 #1
0
def get_block_data(public_API, b_hash):
    """return block data by hash unless we are hiding it"""
    resp = ''
    b_hash = reconstructhash.reconstruct_hash(b_hash)
    if stringvalidators.validate_hash(b_hash):
        if not config.get('general.hide_created_blocks', True) \
                or b_hash not in public_API.hideBlocks:
            if b_hash in public_API._too_many.get(BlockList).get():
                block = apiutils.GetBlockData().get_block_data(b_hash,
                                                               raw=True,
                                                               decrypt=False)
                try:
                    # Encode in case data is binary
                    block = block.encode('utf-8')
                except AttributeError:
                    # 404 if no block data
                    if not block:
                        abort(404)
                    if not len(block):
                        abort(404)
                resp = block
    if len(resp) == 0:
        abort(404)
        resp = ""
    # Has to be octet stream, otherwise binary data fails hash check
    return Response(resp, mimetype='application/octet-stream')
예제 #2
0
    def clean_session(self,
                      specific_session: Union[str, UploadSession] = None):
        comm_inst: OnionrCommunicatorDaemon = self._too_many.get_by_string(
            "OnionrCommunicatorDaemon")
        sessions_to_delete = []
        if comm_inst.getUptime() < 120: return
        onlinePeerCount = len(comm_inst.onlinePeers)

        # If we have no online peers right now,
        if onlinePeerCount == 0: return

        for session in self.sessions:
            # if over 50% of peers that were online for a session have become unavailable, don't kill sessions
            if session.total_success_count > onlinePeerCount:
                if onlinePeerCount / session.total_success_count >= 0.5: return
            # Clean sessions if they have uploaded to enough online peers
            if session.total_success_count <= 0: continue
            if (session.total_success_count / onlinePeerCount
                ) >= onionrvalues.MIN_BLOCK_UPLOAD_PEER_PERCENT:
                sessions_to_delete.append(session)
        for session in sessions_to_delete:
            self.sessions.remove(session)
            # TODO cleanup to one round of search
            # Remove the blocks from the sessions, upload list, and waitforshare list
            try:
                comm_inst.blocksToUpload.remove(
                    reconstructhash.reconstruct_hash(session.block_hash))
            except ValueError:
                pass
            try:
                comm_inst.blocksToUpload.remove(session.block_hash)
            except ValueError:
                pass
            localcommand.local_command('waitforshare/{session.block_hash}')
예제 #3
0
def ban_block():
    """Deletes a block, permanently blacklisting it"""
    blacklist = onionrblacklist.OnionrBlackList()
    try:
        ban = sys.argv[2]
    except IndexError:
        # Get the hash if its not provided as a CLI argument
        ban = logger.readline('Enter a block hash:').strip()
    # Make sure the hash has no truncated zeroes
    ban = reconstructhash.reconstruct_hash(ban)
    if stringvalidators.validate_hash(ban):
        if not blacklist.inBlacklist(ban):
            try:
                blacklist.addToDB(ban)
                removeblock.remove_block(ban)
                deleteBlock(ban)
            except Exception as error:
                logger.error('Could not blacklist block',
                             error=error,
                             terminal=True)
            else:
                logger.info('Block blacklisted', terminal=True)
        else:
            logger.warn('That block is already blacklisted', terminal=True)
    else:
        logger.error('Invalid block hash', terminal=True)
예제 #4
0
def add_deleted(keyStore, b_hash):
    existing = keyStore.get('deleted_mail')
    bHash = reconstructhash.reconstruct_hash(b_hash)
    if existing is None:
        existing = []
    else:
        if bHash in existing:
            return
    keyStore.put('deleted_mail', existing.append(b_hash))
예제 #5
0
 def waitforshare(name):
     '''Used to prevent the **public** api from sharing blocks we just created'''
     if not name.isalnum():
         raise ValueError('block hash needs to be alpha numeric')
     name = reconstructhash.reconstruct_hash(name)
     if name in client_api.publicAPI.hideBlocks:
         client_api.publicAPI.hideBlocks.remove(name)
         return Response("removed")
     else:
         client_api.publicAPI.hideBlocks.append(name)
         return Response("added")
예제 #6
0
파일: session.py 프로젝트: x0rzkov/onionr
    def __init__(self, block_hash: Union[str, bytes]):
        block_hash = bytesconverter.bytes_to_str(block_hash)
        block_hash = reconstructhash.reconstruct_hash(block_hash)
        if not stringvalidators.validate_hash(block_hash): raise ValueError

        self.start_time = epoch.get_epoch()
        self.block_hash = reconstructhash.deconstruct_hash(block_hash)
        self.total_fail_count: int = 0
        self.total_success_count: int = 0
        self.peer_fails = {}
        self.peer_exists = {}
예제 #7
0
        def wait_for_share(name):
            """Prevent the **public** api from sharing blocks.

            Used for blocks we created usually
            """
            if not name.isalnum():
                raise ValueError('block hash needs to be alpha numeric')
            name = reconstructhash.reconstruct_hash(name)
            if name in client_api.publicAPI.hideBlocks:
                return Response("will be removed")
            else:
                client_api.publicAPI.hideBlocks.append(name)
                return Response("added")
예제 #8
0
    def clean_session(self,
                      specific_session: Union[str, 'UploadSession'] = None):

        comm_inst: 'OnionrCommunicatorDaemon'  # type: ignore
        comm_inst = self._too_many.get_by_string(  # pylint: disable=E1101 type: ignore
            "OnionrCommunicatorDaemon")
        kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string(
            "DeadSimpleKV")
        sessions_to_delete = []
        if kv.get('startTime') < 120:
            return
        onlinePeerCount = len(kv.get('onlinePeers'))

        # If we have no online peers right now,
        if onlinePeerCount == 0:
            return

        for sess in self.sessions:
            # if over 50% of peers that were online for a session have
            # become unavailable, don't kill sessions
            if sess.total_success_count > onlinePeerCount:
                if onlinePeerCount / sess.total_success_count >= 0.5:
                    return
            # Clean sessions if they have uploaded to enough online peers
            if sess.total_success_count <= 0:
                continue
            if (sess.total_success_count / onlinePeerCount) >= \
                    onionrvalues.MIN_BLOCK_UPLOAD_PEER_PERCENT:
                sessions_to_delete.append(sess)
        for sess in sessions_to_delete:
            try:
                self.sessions.remove(session)
            except ValueError:
                pass
            # TODO cleanup to one round of search
            # Remove the blocks from the sessions, upload list,
            # and waitforshare list
            try:
                kv.get('blocksToUpload').remove(
                    reconstructhash.reconstruct_hash(sess.block_hash))
            except ValueError:
                pass
            try:
                kv.get('blocksToUpload').remove(sess.block_hash)
            except ValueError:
                pass
예제 #9
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
    triedPeers = [] # list of peers we've tried this time around
    maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
    lastLookupTime = 0 # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        listLookupCommand = 'getblocklist' # This is defined here to reset it each time
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
            break
        peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = 0
        else:
            listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
        except Exception as error:
            logger.warn('Could not get new blocks from %s.' % peer, error = error)
            newBlocks = False
        else:
            comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
        if newBlocks != False:
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash
                    if not i in existingBlocks:
                        # if block does not exist on disk and is not already in block queue
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    comm_inst.blockQueue[i] = [peer] # add blocks to download queue
                                    new_block_count += 1
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True)
        comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
    return
예제 #10
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    # List of existing saved blocks
    existingBlocks = get_block_list()
    triedPeers = []  # list of peers we've tried this time around
    # Max amount of *new* block hashes to have in queue
    maxBacklog = 1560
    lastLookupTime = 0  # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        # Defined here to reset it each time, time offset is added later
        listLookupCommand = 'getblocklist'
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug(
                'Not looking up new blocks due to maximum amount of disk used')
            break
        try:
            # select random online peer
            peer = onlinepeers.pick_online_peer(comm_inst)
        except onionrexceptions.OnlinePeerNeeded:
            time.sleep(1)
            continue
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp,
        # to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = epoch.get_epoch() - \
                config.get("general.max_block_age",
                           onionrvalues.DEFAULT_EXPIRE)
        listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(
                comm_inst,
                peer, listLookupCommand)  # get list of new block hashes
        except Exception as error:
            logger.warn(
                f'Could not get new blocks from {peer}.',
                error=error)
            newBlocks = False

        if newBlocks != False:  # noqa
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash

                    # if block does not exist on disk + is not already in queue
                    if i not in existingBlocks:
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and \
                                 not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    # add blocks to download queue
                                    comm_inst.blockQueue[i] = [peer]
                                    new_block_count += 1
                                    comm_inst.dbTimestamps[peer] = \
                                        epoch.get_rounded_epoch(roundS=60)
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info(
            f'Discovered {new_block_count} new block{block_string}',
            terminal=True)
        comm_inst.download_blocks_timer.count = \
            int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
예제 #11
0
 def test_reconstruct(self):
     h = b"4d20d791cbc293999b97cc627aa011692d317dede3d0fbd390c763210b0d"
     self.assertEqual(reconstructhash.reconstruct_hash(h), b"0000" + h)
     h = b"4d20d791cbc293999b97cc627aa011692d317dede3d0fbd390c763210b0d"
     self.assertEqual(reconstructhash.reconstruct_hash(h, 62), b"00" + h)