Example #1
0
def import_block_from_data(content):
    blacklist = onionrblacklist.OnionrBlackList()
    ret_data = False

    try:
        content = content.encode()
    except AttributeError:
        pass

    data_hash = crypto.hashers.sha3_hash(content)

    if blacklist.inBlacklist(data_hash):
        raise BlacklistedBlock(f'%s is a blacklisted block {data_hash}')

    # returns tuple(metadata, meta), meta is also in metadata
    metas = blockmetadata.get_block_metadata_from_data(content)
    metadata = metas[0]

    # check if metadata is valid
    if validatemetadata.validate_metadata(metadata, metas[2]):
        # check if POW is enough/correct
        if crypto.cryptoutils.verify_POW(content):
            logger.info(f'Imported block passed proof, saving: {data_hash}.',
                        terminal=True)
            try:
                blockHash = onionrstorage.set_data(content)
            except DiskAllocationReached:
                logger.warn('Failed to save block due to full disk allocation')
                raise
            else:
                blockmetadb.add_to_block_DB(blockHash, dataSaved=True)
                # caches block metadata values to block database
                blockmetadata.process_block_metadata(blockHash)
                ret_data = blockHash
        else:
            raise InvalidProof
    else:
        raise InvalidMetadata
    return ret_data
Example #2
0
def download_blocks_from_communicator(shared_state: "TooMany"):
    """Use communicator instance to download blocks in the comms's queue"""
    blacklist = onionrblacklist.OnionrBlackList()
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    LOG_SKIP_COUNT = 50  # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(kv.get('blockQueue')):
        count += 1

        try:
            blockPeers = list(kv.get('blockQueue')[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(shared_state, blockHash):
            continue

        if kv.get('shutdown') or not kv.get('isOnline') or \
                storage_counter.is_full():
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in kv.get('currentDownloading'):
            continue

        if len(kv.get('onlinePeers')) == 0:
            break

        # So we can avoid concurrent downloading in other threads of same block
        kv.get('currentDownloading').append(blockHash)
        if len(blockPeers) == 0:
            try:
                peerUsed = onlinepeers.pick_online_peer(kv)
            except onionrexceptions.OnlinePeerNeeded:
                continue
        else:
            SystemRandom().shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not kv.get('shutdown') and peerUsed.strip() != '':
            logger.info(f"Attempting to download %s from {peerUsed}..." %
                        (blockHash[:12], ))
        content = peeraction.peer_action(
            shared_state,
            peerUsed,
            'getdata/' + blockHash,
            max_resp_size=3000000)  # block content from random peer

        if content is not False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode(
                )  # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(
                    content
                )  # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = \
                        validatemetadata.validate_metadata(metadata, metas[2])
                except onionrexceptions.PlaintextNotSupported:
                    logger.debug(
                        f"Not saving {blockHash} due to plaintext not enabled")
                    removeFromQueue = True
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result:  # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(
                            content):  # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' %
                                    blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' %
                                        (blockHash, ))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error(
                                'Reached disk allocation allowance, cannot save block %s.'
                                % (blockHash, ))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(
                                blockHash,
                                dataSaved=True)  # add block to meta db
                            blockmetadata.process_block_metadata(
                                blockHash
                            )  # caches block metadata values to block database
                            spawn(local_command,
                                  f'/daemon-event/upload_event',
                                  post=True,
                                  is_json=True,
                                  post_data={'block': blockHash})
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash, ))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash, ))
                    else:
                        logger.warn('Metadata for block %s is invalid.' %
                                    (blockHash, ))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(
                    content)  # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn('Block hash validation failed for ' +
                                blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False  # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del kv.get(
                        'blockQueue'
                    )[blockHash]  # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                                    [len(kv.get('blockQueue'))],
                                    terminal=True)
                        count = 0
                except KeyError:
                    pass
        kv.get('currentDownloading').remove(blockHash)
Example #3
0
def insert_block(data: Union[str, bytes],
                 header: str = 'txt',
                 sign: bool = False,
                 encryptType: str = '',
                 symKey: str = '',
                 asymPeer: str = '',
                 meta: dict = {},
                 expire: Union[int, None] = None,
                 disableForward: bool = False,
                 signing_key: UserIDSecretKey = '') -> Union[str, bool]:
    """
    Create and insert a block into the network.

    encryptType must be specified to encrypt a block
    if expire is less than date, assumes seconds into future.
        if not assume exact epoch
    """
    our_private_key = crypto.priv_key
    our_pub_key = crypto.pub_key

    storage_counter = storagecounter.StorageCounter()

    allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
    if storage_counter.is_full():
        logger.error(allocationReachedMessage)
        raise onionrexceptions.DiskAllocationReached

    if signing_key != '':
        # if it was specified to use an alternative private key
        our_private_key = signing_key
        our_pub_key = bytesconverter.bytes_to_str(
            crypto.cryptoutils.get_pub_key_from_priv(our_private_key))

    retData = False

    if type(data) is None:
        raise ValueError('Data cannot be none')

    createTime = epoch.get_epoch()

    dataNonce = bytesconverter.bytes_to_str(crypto.hashers.sha3_hash(data))
    try:
        with open(filepaths.data_nonce_file, 'r') as nonces:
            if dataNonce in nonces:
                return retData
    except FileNotFoundError:
        pass
    # record nonce
    with open(filepaths.data_nonce_file, 'a') as nonceFile:
        nonceFile.write(dataNonce + '\n')

    plaintext = data
    plaintextMeta = {}
    plaintextPeer = asymPeer

    retData = ''
    signature = ''
    signer = ''
    metadata = {}

    # metadata is full block metadata
    # meta is internal, user specified metadata

    # only use header if not set in provided meta

    meta['type'] = str(header)

    if encryptType in ('asym', 'sym'):
        metadata['encryptType'] = encryptType
    else:
        if not config.get('general.store_plaintext_blocks', True):
            raise onionrexceptions.InvalidMetadata(
                "Plaintext blocks are disabled, " +
                "yet a plaintext block was being inserted")
        if encryptType not in ('', None):
            raise onionrexceptions.InvalidMetadata(
                'encryptType must be asym or sym, or blank')

    try:
        data = data.encode()
    except AttributeError:
        pass

    if encryptType == 'asym':
        # Duplicate the time in encrypted messages to help prevent replays
        meta['rply'] = createTime
        if sign and asymPeer != our_pub_key:
            try:
                forwardEncrypted = onionrusers.OnionrUser(
                    asymPeer).forwardEncrypt(data)
                data = forwardEncrypted[0]
                meta['forwardEnc'] = True
                # Expire time of key. no sense keeping block after that
                expire = forwardEncrypted[2]
            except onionrexceptions.InvalidPubkey:
                pass
            if not disableForward:
                fsKey = onionrusers.OnionrUser(asymPeer).generateForwardKey()
                meta['newFSKey'] = fsKey
    jsonMeta = json.dumps(meta)
    plaintextMeta = jsonMeta
    if sign:
        signature = crypto.signing.ed_sign(jsonMeta.encode() + data,
                                           key=our_private_key,
                                           encodeResult=True)
        signer = our_pub_key

    if len(jsonMeta) > 1000:
        raise onionrexceptions.InvalidMetadata(
            'meta in json encoded form must not exceed 1000 bytes')

    # encrypt block metadata/sig/content
    if encryptType == 'sym':
        raise NotImplementedError("not yet implemented")
    elif encryptType == 'asym':
        if stringvalidators.validate_pub_key(asymPeer):
            # Encrypt block data with forward secrecy key first, but not meta
            jsonMeta = json.dumps(meta)
            jsonMeta = crypto.encryption.pub_key_encrypt(
                jsonMeta, asymPeer, encodedData=True).decode()
            data = crypto.encryption.pub_key_encrypt(data,
                                                     asymPeer,
                                                     encodedData=False)
            signature = crypto.encryption.pub_key_encrypt(
                signature, asymPeer, encodedData=True).decode()
            signer = crypto.encryption.pub_key_encrypt(
                signer, asymPeer, encodedData=True).decode()
            try:
                onionrusers.OnionrUser(asymPeer, saveUser=True)
            except ValueError:
                # if peer is already known
                pass
        else:
            raise onionrexceptions.InvalidPubkey(
                asymPeer + ' is not a valid base32 encoded ed25519 key')

    # compile metadata
    metadata['meta'] = jsonMeta
    if len(signature) > 0:  # I don't like not pattern
        metadata['sig'] = signature
        metadata['signer'] = signer
    metadata['time'] = createTime

    # ensure expire is integer and of sane length
    if type(expire) is not type(None):  # noqa
        if not len(str(int(expire))) < 20:
            raise ValueError(
                'expire must be valid int less than 20 digits in length')
        # if expire is less than date, assume seconds into future
        if expire < epoch.get_epoch():
            expire = epoch.get_epoch() + expire
        metadata['expire'] = expire

    # send block data (and metadata) to POW module to get tokenized block data
    payload = subprocesspow.SubprocessPOW(data, metadata).start()
    if payload != False:  # noqa
        try:
            retData = onionrstorage.set_data(payload)
        except onionrexceptions.DiskAllocationReached:
            logger.error(allocationReachedMessage)
            retData = False
        else:
            if disableForward:
                logger.warn(
                    f'{retData} asym encrypted block created w/o ephemerality')
            """
            Tell the api server through localCommand to wait for the daemon to
            upload this block to make statistical analysis more difficult
            """
            spawn(localcommand.local_command,
                  '/daemon-event/upload_event',
                  post=True,
                  is_json=True,
                  post_data={
                      'block': retData
                  }).get(timeout=5)
            coredb.blockmetadb.add.add_to_block_DB(retData,
                                                   selfInsert=True,
                                                   dataSaved=True)

            if expire is None:
                coredb.blockmetadb.update_block_info(
                    retData, 'expire', createTime + min(
                        onionrvalues.DEFAULT_EXPIRE,
                        config.get('general.max_block_age',
                                   onionrvalues.DEFAULT_EXPIRE)))
            else:
                coredb.blockmetadb.update_block_info(retData, 'expire', expire)

            blockmetadata.process_block_metadata(retData)

    if retData != False:  # noqa
        if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:
            events.event('insertdeniable', {
                'content': plaintext,
                'meta': plaintextMeta,
                'hash': retData,
                'peer': bytesconverter.bytes_to_str(asymPeer)
            },
                         threaded=True)
        else:
            events.event('insertblock', {
                'content': plaintext,
                'meta': plaintextMeta,
                'hash': retData,
                'peer': bytesconverter.bytes_to_str(asymPeer)
            },
                         threaded=True)

    spawn(localcommand.local_command,
          '/daemon-event/remove_from_insert_queue_wrapper',
          post=True,
          post_data={
              'block_hash': retData
          },
          is_json=True).get(timeout=5)
    return retData
Example #4
0
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"):
    '''Use communicator instance to download blocks in the comms's queue'''
    blacklist = onionrblacklist.OnionrBlackList()
    storage_counter = storagecounter.StorageCounter()
    LOG_SKIP_COUNT = 50  # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(comm_inst.blockQueue):
        count += 1
        if len(comm_inst.onlinePeers) == 0:
            break
        triedQueuePeers = []  # List of peers we've tried for a block
        try:
            blockPeers = list(comm_inst.blockQueue[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(comm_inst, blockHash):
            continue

        if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full(
        ):
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in comm_inst.currentDownloading:
            continue

        comm_inst.currentDownloading.append(
            blockHash
        )  # So we can avoid concurrent downloading in other threads of same block
        if len(blockPeers) == 0:
            peerUsed = onlinepeers.pick_online_peer(comm_inst)
        else:
            blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not comm_inst.shutdown and peerUsed.strip() != '':
            logger.info("Attempting to download %s from %s..." %
                        (blockHash[:12], peerUsed))
        content = peeraction.peer_action(
            comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000
        )  # block content from random peer (includes metadata)

        if content != False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode(
                )  # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(
                    content
                )  # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = validatemetadata.validate_metadata(
                        metadata, metas[2])
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result:  # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(
                            content):  # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' %
                                    blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' %
                                        (blockHash, ))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error(
                                'Reached disk allocation allowance, cannot save block %s.'
                                % (blockHash, ))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(
                                blockHash,
                                dataSaved=True)  # add block to meta db
                            blockmetadata.process_block_metadata(
                                blockHash
                            )  # caches block metadata values to block database
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash, ))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash, ))
                    else:
                        logger.warn('Metadata for block %s is invalid.' %
                                    (blockHash, ))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(
                    content)  # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn('Block hash validation failed for ' +
                                blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False  # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del comm_inst.blockQueue[
                        blockHash]  # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                                    [len(comm_inst.blockQueue)],
                                    terminal=True)
                        count = 0
                except KeyError:
                    pass
        comm_inst.currentDownloading.remove(blockHash)
    comm_inst.decrementThreadCount('getBlocks')