Example #1
0
def ban_block():
    """Deletes a block, permanently blacklisting it"""
    blacklist = onionrblacklist.OnionrBlackList()
    try:
        ban = sys.argv[2]
    except IndexError:
        # Get the hash if its not provided as a CLI argument
        ban = logger.readline('Enter a block hash:').strip()
    # Make sure the hash has no truncated zeroes
    ban = reconstructhash.reconstruct_hash(ban)
    if stringvalidators.validate_hash(ban):
        if not blacklist.inBlacklist(ban):
            try:
                blacklist.addToDB(ban)
                removeblock.remove_block(ban)
                deleteBlock(ban)
            except Exception as error:
                logger.error('Could not blacklist block',
                             error=error,
                             terminal=True)
            else:
                logger.info('Block blacklisted', terminal=True)
        else:
            logger.warn('That block is already blacklisted', terminal=True)
    else:
        logger.error('Invalid block hash', terminal=True)
Example #2
0
def mergeAdders(newAdderList):
    '''
        Merge peer adders list to our database
    '''
    blacklist = onionrblacklist.OnionrBlackList()
    retVal = False
    if newAdderList != False:
        for adder in newAdderList.split(','):
            adder = adder.strip()
            if not adder in keydb.listkeys.list_adders(
                    randomOrder=False) and not adder in gettransports.get(
                    ) and not blacklist.inBlacklist(adder):
                if keydb.addkeys.add_address(adder):
                    # Check if we have the maximum amount of allowed stored peers
                    if config.get('peers.max_stored_peers') > len(
                            keydb.listkeys.list_adders()):
                        logger.info('Added %s to db.' % adder, timestamp=True)
                        retVal = True
                    else:
                        logger.warn(
                            'Reached the maximum amount of peers in the net database as allowed by your config.'
                        )
            else:
                pass
                #logger.debug('%s is either our address or already in our DB' % adder)
    return retVal
Example #3
0
def peer_cleanup():
    '''Removes peers who have been offline too long or score too low'''
    logger.info('Cleaning peers...')
    blacklist = onionrblacklist.OnionrBlackList()
    adders = scoresortedpeerlist.get_score_sorted_peer_list()
    adders.reverse()

    if len(adders) > 1:

        min_score = int(config.get('peers.minimum_score', -100))
        max_peers = int(config.get('peers.max_stored', 5000))

        for address in adders:
            # Remove peers that go below the negative score
            if peerprofiles.PeerProfiles(address).score < min_score:
                keydb.removekeys.remove_address(address)
                try:
                    lastConnect = int(
                        keydb.transportinfo.get_address_info(
                            address, 'lastConnect'))
                    expireTime = 86400 - int(epoch.get_epoch()) - lastConnect
                    blacklist.addToDB(address, dataType=1, expire=expireTime)
                except sqlite3.IntegrityError:  #TODO just make sure its not a unique constraint issue
                    pass
                except ValueError:
                    pass
                logger.warn('Removed address ' + address + '.')

    # Unban probably not malicious peers TODO improve
    blacklist.deleteExpired(dataType=1)
Example #4
0
def __purge_block(shared_state, block_hash, add_to_blacklist = True):
    blacklist = None

    removeblock.remove_block(block_hash)
    onionrstorage.deleteBlock(block_hash)
    __remove_from_upload(shared_state, block_hash)

    if add_to_blacklist:
        blacklist = onionrblacklist.OnionrBlackList()
        blacklist.addToDB(block_hash)
Example #5
0
    def __init__(self, address):
        if not stringvalidators.validate_transport(address): raise onionrexceptions.InvalidAddress
        self.address = address # node address
        self.score = None
        self.friendSigCount = 0
        self.success = 0
        self.failure = 0
        self.connectTime = None

        self.loadScore()
        self.getConnectTime()

        self.last_updated = {'connect_time': UPDATE_DELAY, 'score': UPDATE_DELAY} # Last time a given value was updated
        
        if not address in keydb.listkeys.list_adders() and not onionrblacklist.OnionrBlackList().inBlacklist(address):
            keydb.addkeys.add_address(address)
Example #6
0
def clean_old_blocks(shared_state):
    """Delete expired blocks + old blocks if disk allocation is near full"""
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        __purge_block(shared_state, bHash, add_to_blacklist=True)
        logger.info('Deleted expired block: %s' % (bHash,))

    while storage_counter.is_full():
        try:
            oldest = blockmetadb.get_block_list()[0]
        except IndexError:
            break
        else:
            __purge_block(shared_state, bHash, add_to_blacklist=True)
            logger.info('Deleted block because of full storage: %s' % (oldest,))
Example #7
0
def should_download(comm_inst, block_hash) -> bool:
    """Return bool for if a (assumed to exist) block should be downloaded."""
    blacklist = onionrblacklist.OnionrBlackList()
    should = True
    if block_hash in blockmetadb.get_block_list():
        # Don't download block we have
        should = False
    else:
        if blacklist.inBlacklist(block_hash):
            # Don't download blacklisted block
            should = False
    if should is False:
        # Remove block from communicator queue if it shouldn't be downloaded
        try:
            del comm_inst.blockQueue[block_hash]
        except KeyError:
            pass
    return should
Example #8
0
def should_download(shared_state, block_hash) -> bool:
    """Return bool for if a (assumed to exist) block should be downloaded."""
    blacklist = onionrblacklist.OnionrBlackList()
    should = True
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    if block_hash in blockmetadb.get_block_list():
        # Don't download block we have
        should = False
    else:
        if blacklist.inBlacklist(block_hash):
            # Don't download blacklisted block
            should = False
    if should is False:
        # Remove block from communicator queue if it shouldn't be downloaded
        try:
            del kv.get('blockQueue')[block_hash]
        except KeyError:
            pass
    return should
Example #9
0
def clean_old_blocks(comm_inst):
    '''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        blacklist.addToDB(bHash)
        removeblock.remove_block(bHash)
        onionrstorage.deleteBlock(bHash)
        __remove_from_upload(comm_inst, bHash)
        logger.info('Deleted block: %s' % (bHash, ))

    while comm_inst.storage_counter.is_full():
        oldest = blockmetadb.get_block_list()[0]
        blacklist.addToDB(oldest)
        removeblock.remove_block(oldest)
        onionrstorage.deleteBlock(oldest)
        __remove_from_upload.remove(comm_inst, oldest)
        logger.info('Deleted block: %s' % (oldest, ))

    comm_inst.decrementThreadCount('clean_old_blocks')
Example #10
0
def clean_old_blocks(comm_inst):
    """Delete expired blocks + old blocks if disk allocation is near full"""
    blacklist = onionrblacklist.OnionrBlackList()
    # Delete expired blocks
    for bHash in blockmetadb.expiredblocks.get_expired_blocks():
        blacklist.addToDB(bHash)
        removeblock.remove_block(bHash)
        onionrstorage.deleteBlock(bHash)
        __remove_from_upload(comm_inst, bHash)
        logger.info('Deleted block: %s' % (bHash, ))

    while storage_counter.is_full():
        try:
            oldest = blockmetadb.get_block_list()[0]
        except IndexError:
            break
        else:
            blacklist.addToDB(oldest)
            removeblock.remove_block(oldest)
            onionrstorage.deleteBlock(oldest)
            __remove_from_upload(comm_inst, oldest)
            logger.info('Deleted block: %s' % (oldest, ))

    comm_inst.decrementThreadCount('clean_old_blocks')
Example #11
0
def download_blocks_from_communicator(shared_state: "TooMany"):
    """Use communicator instance to download blocks in the comms's queue"""
    blacklist = onionrblacklist.OnionrBlackList()
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    LOG_SKIP_COUNT = 50  # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(kv.get('blockQueue')):
        count += 1

        try:
            blockPeers = list(kv.get('blockQueue')[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(shared_state, blockHash):
            continue

        if kv.get('shutdown') or not kv.get('isOnline') or \
                storage_counter.is_full():
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in kv.get('currentDownloading'):
            continue

        if len(kv.get('onlinePeers')) == 0:
            break

        # So we can avoid concurrent downloading in other threads of same block
        kv.get('currentDownloading').append(blockHash)
        if len(blockPeers) == 0:
            try:
                peerUsed = onlinepeers.pick_online_peer(kv)
            except onionrexceptions.OnlinePeerNeeded:
                continue
        else:
            SystemRandom().shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not kv.get('shutdown') and peerUsed.strip() != '':
            logger.info(f"Attempting to download %s from {peerUsed}..." %
                        (blockHash[:12], ))
        content = peeraction.peer_action(
            shared_state,
            peerUsed,
            'getdata/' + blockHash,
            max_resp_size=3000000)  # block content from random peer

        if content is not False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode(
                )  # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(
                    content
                )  # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = \
                        validatemetadata.validate_metadata(metadata, metas[2])
                except onionrexceptions.PlaintextNotSupported:
                    logger.debug(
                        f"Not saving {blockHash} due to plaintext not enabled")
                    removeFromQueue = True
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result:  # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(
                            content):  # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' %
                                    blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' %
                                        (blockHash, ))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error(
                                'Reached disk allocation allowance, cannot save block %s.'
                                % (blockHash, ))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(
                                blockHash,
                                dataSaved=True)  # add block to meta db
                            blockmetadata.process_block_metadata(
                                blockHash
                            )  # caches block metadata values to block database
                            spawn(local_command,
                                  f'/daemon-event/upload_event',
                                  post=True,
                                  is_json=True,
                                  post_data={'block': blockHash})
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash, ))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash, ))
                    else:
                        logger.warn('Metadata for block %s is invalid.' %
                                    (blockHash, ))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(
                    content)  # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn('Block hash validation failed for ' +
                                blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False  # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del kv.get(
                        'blockQueue'
                    )[blockHash]  # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                                    [len(kv.get('blockQueue'))],
                                    terminal=True)
                        count = 0
                except KeyError:
                    pass
        kv.get('currentDownloading').remove(blockHash)
Example #12
0
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.
'''
import logger, onionrproofs
from onionrutils import stringvalidators, epoch
from communicator import peeraction, onlinepeers
from coredb import blockmetadb
from utils import reconstructhash
from onionrblocks import onionrblacklist
blacklist = onionrblacklist.OnionrBlackList()
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
    triedPeers = [] # list of peers we've tried this time around
    maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
    lastLookupTime = 0 # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        listLookupCommand = 'getblocklist' # This is defined here to reset it each time
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
Example #13
0
def show_stats():
    """Print/log statistic info about our Onionr install."""
    try:
        # define stats messages here
        totalBlocks = len(blockmetadb.get_block_list())
        home = identifyhome.identify_home()
        totalBanned = len(onionrblacklist.OnionrBlackList().getList())

        messages = {
            # info about local client
            'Onionr Daemon Status':
            ((logger.colors.fg.green + 'Online') if check_communicator(
                timeout=9) else logger.colors.fg.red + 'Offline'),

            # file and folder size stats
            'div1':
            True,  # this creates a solid line across the screen, a div
            'Total Block Size':
            sizeutils.human_size(sizeutils.size(home + 'blocks/')),
            'Total Plugin Size':
            sizeutils.human_size(sizeutils.size(home + 'plugins/')),
            'Log File Size':
            sizeutils.human_size(sizeutils.size(home + 'output.log')),

            # count stats
            'div2':
            True,
            'Known Peers (nodes)':
            str(max(len(keydb.listkeys.list_adders()) - 1, 0)),
            'Enabled Plugins':
            str(len(config.get('plugins.enabled', list()))) + ' / ' +
            str(len(os.listdir(home + 'plugins/'))),
            'Stored Blocks':
            str(totalBlocks),
            'Deleted Blocks':
            str(totalBanned)
        }

        # color configuration
        colors = {
            'title': logger.colors.bold,
            'key': logger.colors.fg.lightgreen,
            'val': logger.colors.fg.green,
            'border': logger.colors.fg.lightblue,
            'reset': logger.colors.reset
        }

        # pre-processing
        maxlength = 0
        width = getconsolewidth.get_console_width()
        for key, val in messages.items():
            if not (type(val) is bool and val is True):
                maxlength = max(len(key), maxlength)
        prewidth = maxlength + len(' | ')
        groupsize = width - prewidth - len('[+] ')

        # generate stats table
        logger.info(colors['title'] +
                    'Onionr v%s Statistics' % onionrvalues.ONIONR_VERSION +
                    colors['reset'],
                    terminal=True)
        logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                    colors['reset'],
                    terminal=True)
        for key, val in messages.items():
            if not (type(val) is bool and val is True):
                val = [
                    str(val)[i:i + groupsize]
                    for i in range(0, len(str(val)), groupsize)
                ]

                logger.info(colors['key'] + str(key).rjust(maxlength) +
                            colors['reset'] + colors['border'] + ' | ' +
                            colors['reset'] + colors['val'] + str(val.pop(0)) +
                            colors['reset'],
                            terminal=True)

                for value in val:
                    logger.info(' ' * maxlength + colors['border'] + ' | ' +
                                colors['reset'] + colors['val'] + str(value) +
                                colors['reset'],
                                terminal=True)
            else:
                logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                            colors['reset'],
                            terminal=True)
        logger.info(colors['border'] + '-' * (maxlength + 1) + '+' +
                    colors['reset'],
                    terminal=True)
    except Exception as e:  # pylint: disable=W0703
        logger.error('Failed to generate statistics table. ' + str(e),
                     error=e,
                     timestamp=False,
                     terminal=True)
Example #14
0
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"):
    '''Use communicator instance to download blocks in the comms's queue'''
    blacklist = onionrblacklist.OnionrBlackList()
    storage_counter = storagecounter.StorageCounter()
    LOG_SKIP_COUNT = 50  # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(comm_inst.blockQueue):
        count += 1
        if len(comm_inst.onlinePeers) == 0:
            break
        triedQueuePeers = []  # List of peers we've tried for a block
        try:
            blockPeers = list(comm_inst.blockQueue[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(comm_inst, blockHash):
            continue

        if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full(
        ):
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in comm_inst.currentDownloading:
            continue

        comm_inst.currentDownloading.append(
            blockHash
        )  # So we can avoid concurrent downloading in other threads of same block
        if len(blockPeers) == 0:
            peerUsed = onlinepeers.pick_online_peer(comm_inst)
        else:
            blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not comm_inst.shutdown and peerUsed.strip() != '':
            logger.info("Attempting to download %s from %s..." %
                        (blockHash[:12], peerUsed))
        content = peeraction.peer_action(
            comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000
        )  # block content from random peer (includes metadata)

        if content != False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode(
                )  # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(
                    content
                )  # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = validatemetadata.validate_metadata(
                        metadata, metas[2])
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result:  # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(
                            content):  # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' %
                                    blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' %
                                        (blockHash, ))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error(
                                'Reached disk allocation allowance, cannot save block %s.'
                                % (blockHash, ))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(
                                blockHash,
                                dataSaved=True)  # add block to meta db
                            blockmetadata.process_block_metadata(
                                blockHash
                            )  # caches block metadata values to block database
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash, ))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash, ))
                    else:
                        logger.warn('Metadata for block %s is invalid.' %
                                    (blockHash, ))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(
                    content)  # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn('Block hash validation failed for ' +
                                blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False  # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del comm_inst.blockQueue[
                        blockHash]  # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                                    [len(comm_inst.blockQueue)],
                                    terminal=True)
                        count = 0
                except KeyError:
                    pass
        comm_inst.currentDownloading.remove(blockHash)
    comm_inst.decrementThreadCount('getBlocks')