コード例 #1
0
        def send_headers(resp):
            """Send api, access control headers"""
            resp = httpheaders.set_default_onionr_http_headers(resp)
            # Network API version
            resp.headers['X-API'] = public_api.API_VERSION
            resp.headers['Access-Control-Allow-Origin'] = "*"
            resp.headers['Access-Control-Allow-Headers'] = "*"
            resp.headers['Access-Control-Allow-Methods'] = "POST, GET, OPTIONS"
            # Delete some HTTP headers for Onionr user agents
            NON_NETWORK_HEADERS = ('Content-Security-Policy',
                                   'X-Frame-Options', 'X-Content-Type-Options',
                                   'Feature-Policy', 'Clear-Site-Data',
                                   'Referrer-Policy',
                                   'Access-Control-Allow-Origin',
                                   'Access-Control-Allow-Headers',
                                   'Access-Control-Allow-Methods')

            # For other nodes, we don't need to waste bits on the above headers
            try:
                if g.is_onionr_client:
                    for header in NON_NETWORK_HEADERS:
                        del resp.headers[header]
                else:
                    del resp.headers['X-API']
            except AttributeError:
                abort(403)

            public_api.lastRequest = epoch.get_rounded_epoch(roundS=5)
            return resp
コード例 #2
0
ファイル: process.py プロジェクト: infoabcd/inti
def process_block_metadata(blockHash: str):
    """
    Read metadata from a block and cache it to the block database.

    blockHash -> sha3_256 hex formatted hash of Onionr block
    """
    curTime = epoch.get_rounded_epoch(roundS=60)
    myBlock = onionrblockapi.Block(blockHash)
    if myBlock.isEncrypted:
        myBlock.decrypt()
    if (myBlock.isEncrypted
            and myBlock.decrypted) or (not myBlock.isEncrypted):
        blockType = myBlock.getMetadata(
            'type'
        )  # we would use myBlock.getType() here, but it is bugged with encrypted blocks

        signer = bytesconverter.bytes_to_str(myBlock.signer)
        valid = myBlock.verifySig()
        if valid:
            if myBlock.getMetadata('newFSKey') is not None:
                try:
                    onionrusers.OnionrUser(signer).addForwardKey(
                        myBlock.getMetadata('newFSKey'))
                except onionrexceptions.InvalidPubkey:
                    logger.warn(
                        '%s has invalid forward secrecy key to add: %s' %
                        (signer, myBlock.getMetadata('newFSKey')))

        try:
            if len(blockType) <= onionrvalues.MAX_BLOCK_TYPE_LENGTH:
                blockmetadb.update_block_info(blockHash, 'dataType', blockType)
        except TypeError:
            logger.warn("Missing block information")
            pass
        # Set block expire time if specified
        try:
            expireTime = int(myBlock.getHeader('expire'))
            # test that expire time is an integer of sane length (for epoch)
            # doesn't matter if its too large because of the min() func below
            if not len(str(expireTime)) < 20:
                raise ValueError('timestamp invalid')
        except (ValueError, TypeError) as e:
            expireTime = onionrvalues.DEFAULT_EXPIRE + curTime
        finally:
            expireTime = min(expireTime, curTime + onionrvalues.DEFAULT_EXPIRE)
            blockmetadb.update_block_info(blockHash, 'expire', expireTime)

        if blockType == 'update': updater.update_event(myBlock)
        onionrevents.event('processblocks',
                           data={
                               'block': myBlock,
                               'type': blockType,
                               'signer': signer,
                               'validSig': valid
                           })
コード例 #3
0
 def send_headers(resp):
     '''Send api, access control headers'''
     resp = httpheaders.set_default_onionr_http_headers(resp)
     # Network API version
     resp.headers['X-API'] = public_api.API_VERSION
     # Delete some HTTP headers for Onionr user agents
     NON_NETWORK_HEADERS = ('Content-Security-Policy',
                            'X-Frame-Options', 'X-Content-Type-Options',
                            'Feature-Policy', 'Clear-Site-Data',
                            'Referrer-Policy')
     if g.is_onionr_client:
         for header in NON_NETWORK_HEADERS:
             del resp.headers[header]
     public_api.lastRequest = epoch.get_rounded_epoch(roundS=5)
     return resp
コード例 #4
0
 def send_headers(resp):
     """Send api, access control headers"""
     resp = httpheaders.set_default_onionr_http_headers(resp)
     # Network API version
     resp.headers['X-API'] = lan_client.API_VERSION
     # Delete some HTTP headers for Onionr user agents
     NON_NETWORK_HEADERS = ('Content-Security-Policy',
                            'X-Frame-Options', 'X-Content-Type-Options',
                            'Feature-Policy', 'Clear-Site-Data',
                            'Referrer-Policy')
     try:
         if g.is_onionr_client:
             for header in NON_NETWORK_HEADERS:
                 del resp.headers[header]
     except AttributeError:
         abort(403)
     lan_client.lastRequest = epoch.get_rounded_epoch(roundS=5)
     return resp
コード例 #5
0
ファイル: lookupblocks.py プロジェクト: x0rzkov/onionr
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
    triedPeers = [] # list of peers we've tried this time around
    maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
    lastLookupTime = 0 # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        listLookupCommand = 'getblocklist' # This is defined here to reset it each time
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
            break
        peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = 0
        else:
            listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
        except Exception as error:
            logger.warn('Could not get new blocks from %s.' % peer, error = error)
            newBlocks = False
        else:
            comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
        if newBlocks != False:
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash
                    if not i in existingBlocks:
                        # if block does not exist on disk and is not already in block queue
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    comm_inst.blockQueue[i] = [peer] # add blocks to download queue
                                    new_block_count += 1
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True)
        comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
    return
コード例 #6
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    # List of existing saved blocks
    existingBlocks = get_block_list()
    triedPeers = []  # list of peers we've tried this time around
    # Max amount of *new* block hashes to have in queue
    maxBacklog = 1560
    lastLookupTime = 0  # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        # Defined here to reset it each time, time offset is added later
        listLookupCommand = 'getblocklist'
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug(
                'Not looking up new blocks due to maximum amount of disk used')
            break
        try:
            # select random online peer
            peer = onlinepeers.pick_online_peer(comm_inst)
        except onionrexceptions.OnlinePeerNeeded:
            time.sleep(1)
            continue
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp,
        # to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = epoch.get_epoch() - \
                config.get("general.max_block_age",
                           onionrvalues.DEFAULT_EXPIRE)
        listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(
                comm_inst,
                peer, listLookupCommand)  # get list of new block hashes
        except Exception as error:
            logger.warn(
                f'Could not get new blocks from {peer}.',
                error=error)
            newBlocks = False

        if newBlocks != False:  # noqa
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash

                    # if block does not exist on disk + is not already in queue
                    if i not in existingBlocks:
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and \
                                 not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    # add blocks to download queue
                                    comm_inst.blockQueue[i] = [peer]
                                    new_block_count += 1
                                    comm_inst.dbTimestamps[peer] = \
                                        epoch.get_rounded_epoch(roundS=60)
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info(
            f'Discovered {new_block_count} new block{block_string}',
            terminal=True)
        comm_inst.download_blocks_timer.count = \
            int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')