Beispiel #1
0
def test_inserted_housekeeping(testmanager):
    """Tests that inserted blocks are proprely deleted"""
    bl = insert('testdata', expire=12)
    wait_seconds = 132  # Wait two minutes plus expire time
    count = 0
    if bl in get_block_list():
        while count < wait_seconds:
            if bl in get_block_list():
                sleep(0.8)
                count += 1
            else:
                return
        raise ValueError('Inserted block with expiry not erased')
    else:
        raise ValueError('Inserted block in expiry test not present in list')
Beispiel #2
0
    def get_stats(self):
        """Return statistics about our node"""
        stats = {}
        proc = Process()

        def get_open_files():
            if WINDOWS: return proc.num_handles()
            return proc.num_fds()

        try:
            self._too_many
        except AttributeError:
            sleep(1)
        comm_inst = self._too_many.get(communicator.OnionrCommunicatorDaemon,
                                       args=(self._too_many, ))
        connected = []
        [
            connected.append(x) for x in comm_inst.onlinePeers
            if x not in connected
        ]
        stats['uptime'] = comm_inst.getUptime()
        stats['connectedNodes'] = '\n'.join(connected)
        stats['blockCount'] = len(blockmetadb.get_block_list())
        stats['blockQueueCount'] = len(comm_inst.blockQueue)
        stats['threads'] = proc.num_threads()
        stats['ramPercent'] = proc.memory_percent()
        stats['fd'] = get_open_files()
        stats['diskUsage'] = human_size(size(identify_home()))
        return json.dumps(stats)
Beispiel #3
0
def import_new_blocks(scanDir=''):
    '''
        This function is intended to scan for new blocks ON THE DISK and import them
    '''
    blockList = blockmetadb.get_block_list()
    exist = False
    if scanDir == '':
        scanDir = filepaths.block_data_location
    if not scanDir.endswith('/'):
        scanDir += '/'
    for block in glob.glob(scanDir + "*%s" % (BLOCK_EXPORT_FILE_EXT, )):
        if block.replace(scanDir, '').replace(BLOCK_EXPORT_FILE_EXT,
                                              '') not in blockList:
            exist = True
            logger.info('Found new block on dist %s' % block, terminal=True)
            with open(block, 'rb') as newBlock:
                block = block.replace(scanDir,
                                      '').replace(BLOCK_EXPORT_FILE_EXT, '')
                if crypto.hashers.sha3_hash(newBlock.read()) == block.replace(
                        BLOCK_EXPORT_FILE_EXT, ''):
                    blockmetadb.add_to_block_DB(block.replace(
                        BLOCK_EXPORT_FILE_EXT, ''),
                                                dataSaved=True)
                    logger.info('Imported block %s' % block, terminal=True)
                    blockmetadata.process_block_metadata(block)
                else:
                    logger.warn('Failed to verify hash for %s' % block,
                                terminal=True)
    if not exist:
        logger.info('No blocks found to import', terminal=True)
Beispiel #4
0
    def get_stats(self):
        """Return statistics about our node"""
        stats = {}
        proc = Process()

        def get_open_files():
            if WINDOWS:
                return proc.num_handles()
            return proc.num_fds()

        try:
            self._too_many
        except AttributeError:
            sleep(1)
        kv: "DeadSimpleKV" = self._too_many.get_by_string("DeadSimpleKV")
        connected = []
        [
            connected.append(x) for x in kv.get('onlinePeers')
            if x not in connected
        ]
        stats['uptime'] = get_epoch() - kv.get('startTime')
        stats['connectedNodes'] = '\n'.join(connected)
        stats['blockCount'] = len(blockmetadb.get_block_list())
        stats['blockQueueCount'] = len(kv.get('blockQueue'))
        stats['threads'] = proc.num_threads()
        stats['ramPercent'] = proc.memory_percent()
        stats['fd'] = get_open_files()
        stats['diskUsage'] = human_size(size(identify_home()))
        return json.dumps(stats)
Beispiel #5
0
def clean_blocks_not_meeting_pow(shared_state):
    """Clean blocks not meeting min send/rec pow. Used if config.json POW changes"""
    block_list = blockmetadb.get_block_list()
    for block in block_list:
        if not hashMeetsDifficulty(block):
            logger.warn(
                f"Deleting block {block} because it was stored" + 
                "with a POW level smaller than current.", terminal=True)
            __purge_block(shared_state, block)
Beispiel #6
0
def delete_plaintext_no_blacklist():
    """Delete, but do not blacklist, plaintext blocks."""

    block_list = blockmetadb.get_block_list()

    for block in block_list:
        block = Block(hash=block)
        if not block.isEncrypted:
            remove_block(block.hash)
            onionrstorage.deleteBlock(block.hash)
Beispiel #7
0
def get_public_block_list(publicAPI, request):
    # Provide a list of our blocks, with a date offset
    dateAdjust = request.args.get('date')
    bList = blockmetadb.get_block_list(dateRec=dateAdjust)
    share_list = ''
    if config.get('general.hide_created_blocks', True):
        for b in publicAPI.hideBlocks:
            if b in bList:
                # Don't share blocks we created if they haven't been *uploaded* yet, makes it harder to find who created a block
                bList.remove(b)
    for b in bList:
        share_list += '%s\n' % (reconstructhash.deconstruct_hash(b), )
    return Response(share_list)
Beispiel #8
0
def getData(name):
    resp = ""
    if stringvalidators.validate_hash(name):
        if name in blockmetadb.get_block_list():
            try:
                resp = client_get_block.get_block_data(name, decrypt=True)
            except ValueError:
                pass
        else:
            abort(404)
    else:
        abort(404)
    return Response(resp)
Beispiel #9
0
def test_sneakernet_import(test_manager):
    in_db = lambda b: b in get_block_list()
    bl = insert(os.urandom(10))
    assert in_db(bl)
    export_block(bl)
    assert os.path.exists(export_location + bl + BLOCK_EXPORT_FILE_EXT)
    remove_block(bl)
    deleteBlock(bl)
    assert not in_db(bl)
    os.remove(data_nonce_file)
    move(export_location + bl + BLOCK_EXPORT_FILE_EXT, block_data_location)
    sleep(1)
    assert in_db(bl)
Beispiel #10
0
 def _sync_peer(url):
     our_blocks = get_block_list()
     blocks = requests.get(url + 'blist/0').text.splitlines()
     for block in blocks:
         if block not in our_blocks and hashMeetsDifficulty(block):
             try:
                 import_block_from_data(
                     requests.get(
                         url + f'get/{block}', stream=True).raw.read(6000000))
             except onionrexceptions.InvalidMetadata:
                 logger.warn(f"Could not get {block} from lan peer")
             except onionrexceptions.InvalidProof:
                 logger.warn(
                     f"Invalid proof for {block} from lan peer {peer}", terminal=True)
                 break
Beispiel #11
0
def clean_old_blocks(shared_state):
    """Delete expired blocks + old blocks if disk allocation is near full"""
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        __purge_block(shared_state, bHash, add_to_blacklist=True)
        logger.info('Deleted expired block: %s' % (bHash,))

    while storage_counter.is_full():
        try:
            oldest = blockmetadb.get_block_list()[0]
        except IndexError:
            break
        else:
            __purge_block(shared_state, bHash, add_to_blacklist=True)
            logger.info('Deleted block because of full storage: %s' % (oldest,))
Beispiel #12
0
    def test_export(self):
        testargs = ["onionr.py", "circlesend", "tests", "hello"]
        with patch.object(sys, 'argv', testargs):
            try:
                parser.register()
            except SystemExit:
                pass
        bl = blockmetadb.get_block_list()[0]
        testargs = ["onionr.py", "export-block", bl]
        with patch.object(sys, 'argv', testargs):
            parser.register()

        with open(export_location + '/' + bl + BLOCK_EXPORT_FILE_EXT,
                  'rb') as f:

            if b'hello' not in f.read():
                raise ValueError('No exported block')
Beispiel #13
0
def get_public_block_list(public_API, request):
    # Provide a list of our blocks, with a date offset
    date_adjust = request.args.get('date')
    type_filter = request.args.get('type')
    b_list = blockmetadb.get_block_list(date_rec=date_adjust)
    share_list = ''
    if config.get('general.hide_created_blocks', True):
        for b in public_API.hideBlocks:
            if b in b_list:
                # Don't share blocks we created if they haven't been *uploaded* yet, makes it harder to find who created a block
                b_list.remove(b)
    for b in b_list:
        if type_filter:
            if Block(b, decrypt=False).getType() != type_filter:
                continue
        share_list += '%s\n' % (reconstructhash.deconstruct_hash(b), )
    return Response(share_list)
Beispiel #14
0
def should_download(comm_inst, block_hash) -> bool:
    """Return bool for if a (assumed to exist) block should be downloaded."""
    blacklist = onionrblacklist.OnionrBlackList()
    should = True
    if block_hash in blockmetadb.get_block_list():
        # Don't download block we have
        should = False
    else:
        if blacklist.inBlacklist(block_hash):
            # Don't download blacklisted block
            should = False
    if should is False:
        # Remove block from communicator queue if it shouldn't be downloaded
        try:
            del comm_inst.blockQueue[block_hash]
        except KeyError:
            pass
    return should
Beispiel #15
0
def should_download(shared_state, block_hash) -> bool:
    """Return bool for if a (assumed to exist) block should be downloaded."""
    blacklist = onionrblacklist.OnionrBlackList()
    should = True
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    if block_hash in blockmetadb.get_block_list():
        # Don't download block we have
        should = False
    else:
        if blacklist.inBlacklist(block_hash):
            # Don't download blacklisted block
            should = False
    if should is False:
        # Remove block from communicator queue if it shouldn't be downloaded
        try:
            del kv.get('blockQueue')[block_hash]
        except KeyError:
            pass
    return should
Beispiel #16
0
 def get_stats(self):
     """Return statistics about our node"""
     stats = {}
     try:
         self._too_many
     except AttributeError:
         time.sleep(1)
     comm_inst = self._too_many.get(communicator.OnionrCommunicatorDaemon,
                                    args=(self._too_many, ))
     connected = []
     [
         connected.append(x) for x in comm_inst.onlinePeers
         if x not in connected
     ]
     stats['uptime'] = comm_inst.getUptime()
     stats['connectedNodes'] = '\n'.join(connected)
     stats['blockCount'] = len(blockmetadb.get_block_list())
     stats['blockQueueCount'] = len(comm_inst.blockQueue)
     return json.dumps(stats)
Beispiel #17
0
def get_block_data(publicAPI, data):
    '''data is the block hash in hex'''
    resp = ''
    if stringvalidators.validate_hash(data):
        if not config.get('general.hide_created_blocks',
                          True) or data not in publicAPI.hideBlocks:
            if data in blockmetadb.get_block_list():
                block = apiutils.GetBlockData().get_block_data(data, raw=True)
                try:
                    block = block.encode()  # Encode in case data is binary
                except AttributeError:
                    if len(block) == 0:
                        abort(404)
                block = bytesconverter.str_to_bytes(block)
                resp = block
    if len(resp) == 0:
        abort(404)
        resp = ""
    # Has to be octet stream, otherwise binary data fails hash check
    return Response(resp, mimetype='application/octet-stream')
Beispiel #18
0
def clean_old_blocks(comm_inst):
    '''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        blacklist.addToDB(bHash)
        removeblock.remove_block(bHash)
        onionrstorage.deleteBlock(bHash)
        __remove_from_upload(comm_inst, bHash)
        logger.info('Deleted block: %s' % (bHash, ))

    while comm_inst.storage_counter.is_full():
        oldest = blockmetadb.get_block_list()[0]
        blacklist.addToDB(oldest)
        removeblock.remove_block(oldest)
        onionrstorage.deleteBlock(oldest)
        __remove_from_upload.remove(comm_inst, oldest)
        logger.info('Deleted block: %s' % (oldest, ))

    comm_inst.decrementThreadCount('clean_old_blocks')
Beispiel #19
0
def clean_old_blocks(comm_inst):
    """Delete expired blocks + old blocks if disk allocation is near full"""
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        blacklist.addToDB(bHash)
        removeblock.remove_block(bHash)
        onionrstorage.deleteBlock(bHash)
        __remove_from_upload(comm_inst, bHash)
        logger.info('Deleted block: %s' % (bHash, ))

    while storage_counter.is_full():
        try:
            oldest = blockmetadb.get_block_list()[0]
        except IndexError:
            break
        else:
            blacklist.addToDB(oldest)
            removeblock.remove_block(oldest)
            onionrstorage.deleteBlock(oldest)
            __remove_from_upload(comm_inst, oldest)
            logger.info('Deleted block: %s' % (oldest, ))

    comm_inst.decrementThreadCount('clean_old_blocks')
Beispiel #20
0
import sys
import os
import stem

if not os.path.exists('onionr.sh'):
    os.chdir('../')
sys.path.append("src/")
from coredb.blockmetadb import get_block_list
from onionrblocks.onionrblockapi import Block

for bl in get_block_list():
    bl_obj = Block(bl, decrypt=False)
    b_type = bl_obj.getType()
    if not b_type:
        b_type = "encrypted"
    print(bl + " - " + str(bl_obj.date) + " - " + b_type)
Beispiel #21
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
    triedPeers = [] # list of peers we've tried this time around
    maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
    lastLookupTime = 0 # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        listLookupCommand = 'getblocklist' # This is defined here to reset it each time
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
            break
        peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = 0
        else:
            listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
        except Exception as error:
            logger.warn('Could not get new blocks from %s.' % peer, error = error)
            newBlocks = False
        else:
            comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
        if newBlocks != False:
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash
                    if not i in existingBlocks:
                        # if block does not exist on disk and is not already in block queue
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    comm_inst.blockQueue[i] = [peer] # add blocks to download queue
                                    new_block_count += 1
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True)
        comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
    return
Beispiel #22
0
 def _sync_peer(url):
     our_blocks = get_block_list()
     blocks = requests.get(url + 'blist/0').text.splitlines()
     for block in blocks:
         if block not in our_blocks:
             import_block_from_data(requests.get(url + f'get/{block}', stream=True).raw.read(6000000))
Beispiel #23
0
 def refresh_db(self):
     self.check_time = get_epoch()
     if not self.block_type:
         self.block_list = get_block_list()
     else:
         self.block_list = get_blocks_by_type(self.block_type)
Beispiel #24
0
def show_stats():
    """Print/log statistic info about our Onionr install."""
    try:
        # define stats messages here
        totalBlocks = len(blockmetadb.get_block_list())
        home = identifyhome.identify_home()
        totalBanned = len(onionrblacklist.OnionrBlackList().getList())

        messages = {
            # info about local client
            'Onionr Daemon Status':
            ((logger.colors.fg.green + 'Online') if check_communicator(
                timeout=9) else logger.colors.fg.red + 'Offline'),

            # file and folder size stats
            'div1':
            True,  # this creates a solid line across the screen, a div
            'Total Block Size':
            sizeutils.human_size(sizeutils.size(home + 'blocks/')),
            'Total Plugin Size':
            sizeutils.human_size(sizeutils.size(home + 'plugins/')),
            'Log File Size':
            sizeutils.human_size(sizeutils.size(home + 'output.log')),

            # count stats
            'div2':
            True,
            'Known Peers (nodes)':
            str(max(len(keydb.listkeys.list_adders()) - 1, 0)),
            'Enabled Plugins':
            str(len(config.get('plugins.enabled', list()))) + ' / ' +
            str(len(os.listdir(home + 'plugins/'))),
            'Stored Blocks':
            str(totalBlocks),
            'Deleted Blocks':
            str(totalBanned)
        }

        # color configuration
        colors = {
            'title': logger.colors.bold,
            'key': logger.colors.fg.lightgreen,
            'val': logger.colors.fg.green,
            'border': logger.colors.fg.lightblue,
            'reset': logger.colors.reset
        }

        # pre-processing
        maxlength = 0
        width = getconsolewidth.get_console_width()
        for key, val in messages.items():
            if not (type(val) is bool and val is True):
                maxlength = max(len(key), maxlength)
        prewidth = maxlength + len(' | ')
        groupsize = width - prewidth - len('[+] ')

        # generate stats table
        logger.info(colors['title'] +
                    'Onionr v%s Statistics' % onionrvalues.ONIONR_VERSION +
                    colors['reset'],
                    terminal=True)
        logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                    colors['reset'],
                    terminal=True)
        for key, val in messages.items():
            if not (type(val) is bool and val is True):
                val = [
                    str(val)[i:i + groupsize]
                    for i in range(0, len(str(val)), groupsize)
                ]

                logger.info(colors['key'] + str(key).rjust(maxlength) +
                            colors['reset'] + colors['border'] + ' | ' +
                            colors['reset'] + colors['val'] + str(val.pop(0)) +
                            colors['reset'],
                            terminal=True)

                for value in val:
                    logger.info(' ' * maxlength + colors['border'] + ' | ' +
                                colors['reset'] + colors['val'] + str(value) +
                                colors['reset'],
                                terminal=True)
            else:
                logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                            colors['reset'],
                            terminal=True)
        logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                    colors['reset'],
                    terminal=True)
    except Exception as e:  # pylint: disable=W0703
        logger.error('Failed to generate statistics table. ' + str(e),
                     error=e,
                     timestamp=False,
                     terminal=True)
Beispiel #25
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    # List of existing saved blocks
    existingBlocks = get_block_list()
    triedPeers = []  # list of peers we've tried this time around
    # Max amount of *new* block hashes to have in queue
    maxBacklog = 1560
    lastLookupTime = 0  # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        # Defined here to reset it each time, time offset is added later
        listLookupCommand = 'getblocklist'
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug(
                'Not looking up new blocks due to maximum amount of disk used')
            break
        try:
            # select random online peer
            peer = onlinepeers.pick_online_peer(comm_inst)
        except onionrexceptions.OnlinePeerNeeded:
            time.sleep(1)
            continue
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp,
        # to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = epoch.get_epoch() - \
                config.get("general.max_block_age",
                           onionrvalues.DEFAULT_EXPIRE)
        listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(
                comm_inst,
                peer, listLookupCommand)  # get list of new block hashes
        except Exception as error:
            logger.warn(
                f'Could not get new blocks from {peer}.',
                error=error)
            newBlocks = False

        if newBlocks != False:  # noqa
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash

                    # if block does not exist on disk + is not already in queue
                    if i not in existingBlocks:
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and \
                                 not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    # add blocks to download queue
                                    comm_inst.blockQueue[i] = [peer]
                                    new_block_count += 1
                                    comm_inst.dbTimestamps[peer] = \
                                        epoch.get_rounded_epoch(roundS=60)
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info(
            f'Discovered {new_block_count} new block{block_string}',
            terminal=True)
        comm_inst.download_blocks_timer.count = \
            int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
Beispiel #26
0
 def get_block_list_for_lan(time):
     return Response('\n'.join(get_block_list(date_rec=time)))