Esempio n. 1
0
 def test_count(self):
     _test_setup()
     counter = storagecounter.StorageCounter()
     start_value = counter.get_amount()
     b_hash = onionrblocks.insert("test")
     self.assertGreater(counter.get_amount(), start_value)
     onionrstorage.removeblock.remove_block(b_hash)
     self.assertEqual(counter.get_amount(), start_value)
Esempio n. 2
0
def getDifficultyModifier():
    '''returns the difficulty modifier for block storage based 
    on a variety of factors, currently only disk use.
    '''
    percentUse = storagecounter.StorageCounter().get_percent()
    difficultyIncrease = math.floor(
        4 * percentUse)  # difficulty increase is a step function

    return difficultyIncrease
Esempio n. 3
0
def remove_block(block):
    """Remove a block from this node.

    (does not automatically blacklist).
    **You may want blacklist.addToDB(blockHash)
    """
    if stringvalidators.validate_hash(block):
        conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
        c = conn.cursor()
        t = (block,)
        c.execute('Delete from hashes where hash=?;', t)
        conn.commit()
        conn.close()
        dataSize = sys.getsizeof(onionrstorage.getData(block))
        storagecounter.StorageCounter().remove_bytes(dataSize)
    else:
        raise onionrexceptions.InvalidHexHash
Esempio n. 4
0
def set_data(data) -> str:
    '''
        Set the data assciated with a hash
    '''
    storage_counter = storagecounter.StorageCounter()
    data = data
    dataSize = sys.getsizeof(data)
    nonce_hash = crypto.hashers.sha3_hash(
        bytesconverter.str_to_bytes(
            blockmetadata.fromdata.get_block_metadata_from_data(data)[2]))
    nonce_hash = bytesconverter.bytes_to_str(nonce_hash)

    if not type(data) is bytes:
        data = data.encode()

    dataHash = crypto.hashers.sha3_hash(data)

    if type(dataHash) is bytes:
        dataHash = dataHash.decode()
    blockFileName = filepaths.block_data_location + dataHash + '.dat'
    try:
        onionrstorage.getData(dataHash)
    except onionrexceptions.NoDataAvailable:
        if storage_counter.add_bytes(dataSize) != False:
            onionrstorage.store(data, blockHash=dataHash)
            conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
            c = conn.cursor()
            c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = ?;",
                      (dataHash, ))
            conn.commit()
            conn.close()
            with open(filepaths.data_nonce_file, 'a') as nonceFile:
                nonceFile.write(nonce_hash + '\n')
        else:
            raise onionrexceptions.DiskAllocationReached
    else:
        raise onionrexceptions.DataExists("Data is already set for " +
                                          dataHash)

    return dataHash
Esempio n. 5
0
"""
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.
"""

storage_counter = storagecounter.StorageCounter()


def download_blocks_from_communicator(shared_state: "TooMany"):
    """Use communicator instance to download blocks in the comms's queue"""
    blacklist = onionrblacklist.OnionrBlackList()
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    LOG_SKIP_COUNT = 50  # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(kv.get('blockQueue')):
        count += 1

        try:
            blockPeers = list(kv.get('blockQueue')[blockHash])
Esempio n. 6
0
    def __init__(self, shared_state, developmentMode=None):
        if developmentMode is None:
            developmentMode = config.get('general.dev_mode', False)

        # configure logger and stuff
        self.config = config
        self.storage_counter = storagecounter.StorageCounter()
        self.isOnline = True  # Assume we're connected to the internet
        self.shared_state = shared_state  # TooManyObjects module

        # list of timer instances
        self.timers = []

        # initialize core with Tor socks port being 3rd argument
        self.proxyPort = shared_state.get(NetController).socksPort

        # Upload information, list of blocks to upload
        self.blocksToUpload = []
        self.upload_session_manager = self.shared_state.get(uploadblocks.sessionmanager.BlockUploadSessionManager)
        self.shared_state.share_object()

        # loop time.sleep delay in seconds
        self.delay = 1

        # lists of connected peers and peers we know we can't reach currently
        self.onlinePeers = []
        self.offlinePeers = []
        self.cooldownPeer = {}
        self.connectTimes = {}
        # list of peer's profiles (onionrpeers.PeerProfile instances)
        self.peerProfiles = []
        # Peers merged to us. Don't add to db until we know they're reachable
        self.newPeers = []
        self.announceProgress = {}
        self.announceCache = {}

        self.generating_blocks = []

        # amount of threads running by name, used to prevent too many
        self.threadCounts = {}

        # set true when shutdown command received
        self.shutdown = False

        # list of new blocks to download, added to when new block lists are fetched from peers
        self.blockQueue = {}

        # list of blocks currently downloading, avoid s
        self.currentDownloading = []

        # timestamp when the last online node was seen
        self.lastNodeSeen = None

        # Dict of time stamps for peer's block list lookup times, to avoid downloading full lists all the time
        self.dbTimestamps = {}

        # Clear the daemon queue for any dead messages
        if os.path.exists(dbfiles.daemon_queue_db):
            daemonqueue.clear_daemon_queue()

        # Loads in and starts the enabled plugins
        plugins.reload()

        # time app started running for info/statistics purposes
        self.startTime = epoch.get_epoch()

        uploadqueue.UploadQueue(self) # extends our upload list and saves our list when Onionr exits

        if developmentMode:
            OnionrCommunicatorTimers(self, self.heartbeat, 30)

        # Set timers, function reference, seconds
        # requires_peer True means the timer function won't fire if we have no connected peers
        peerPoolTimer = OnionrCommunicatorTimers(self, onlinepeers.get_online_peers, 60, max_threads=1, my_args=[self])
        OnionrCommunicatorTimers(self, self.runCheck, 2, max_threads=1)

        # Timers to periodically lookup new blocks and download them
        lookup_blocks_timer = OnionrCommunicatorTimers(self, lookupblocks.lookup_blocks_from_communicator, config.get('timers.lookupBlocks', 25), my_args=[self], requires_peer=True, max_threads=1)
        # The block download timer is accessed by the block lookup function to trigger faster download starts
        self.download_blocks_timer = OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 10), requires_peer=True, max_threads=5)

        # Timer to reset the longest offline peer so contact can be attempted again
        OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, my_args=[self])

        # Timer to cleanup old blocks
        blockCleanupTimer = OnionrCommunicatorTimers(self, housekeeping.clean_old_blocks, 20, my_args=[self])

        # Timer to discover new peers
        OnionrCommunicatorTimers(self, lookupadders.lookup_new_peer_transports_with_communicator, 60, requires_peer=True, my_args=[self], max_threads=2)

        # Timer for adjusting which peers we actively communicate to at any given time, to avoid over-using peers
        OnionrCommunicatorTimers(self, cooldownpeer.cooldown_peer, 30, my_args=[self], requires_peer=True)

        # Timer to read the upload queue and upload the entries to peers
        OnionrCommunicatorTimers(self, uploadblocks.upload_blocks_from_communicator, 5, my_args=[self], requires_peer=True, max_threads=1)

        # Timer to process the daemon command queue
        OnionrCommunicatorTimers(self, daemonqueuehandler.handle_daemon_commands, 6, my_args=[self], max_threads=3)

        # Setup direct connections
        if config.get('general.socket_servers', False):
            self.services = onionrservices.OnionrServices()
            self.active_services = []
            self.service_greenlets = []
            OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, max_threads=50, my_args=[self])
        else:
            self.services = None
        
        # {peer_pubkey: ephemeral_address}, the address to reach them
        self.direct_connection_clients = {}
        
        # This timer creates deniable blocks, in an attempt to further obfuscate block insertion metadata
        if config.get('general.insert_deniable_blocks', True):
            deniableBlockTimer = OnionrCommunicatorTimers(self, deniableinserts.insert_deniable_block, 180, my_args=[self], requires_peer=True, max_threads=1)
            deniableBlockTimer.count = (deniableBlockTimer.frequency - 175)

        # Timer to check for connectivity, through Tor to various high-profile onion services
        netCheckTimer = OnionrCommunicatorTimers(self, netcheck.net_check, 500, my_args=[self], max_threads=1)

        # Announce the public API server transport address to other nodes if security level allows
        if config.get('general.security_level', 1) == 0 and config.get('general.announce_node', True):
            # Default to high security level incase config breaks
            announceTimer = OnionrCommunicatorTimers(self, announcenode.announce_node, 3600, my_args=[self], requires_peer=True, max_threads=1)
            announceTimer.count = (announceTimer.frequency - 120)
        else:
            logger.debug('Will not announce node.')
        
        # Timer to delete malfunctioning or long-dead peers
        cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requires_peer=True)

        # Timer to cleanup dead ephemeral forward secrecy keys 
        forwardSecrecyTimer = OnionrCommunicatorTimers(self, housekeeping.clean_keys, 15, my_args=[self], max_threads=1)

        # Adjust initial timer triggers
        peerPoolTimer.count = (peerPoolTimer.frequency - 1)
        cleanupTimer.count = (cleanupTimer.frequency - 60)
        blockCleanupTimer.count = (blockCleanupTimer.frequency - 2)
        lookup_blocks_timer = (lookup_blocks_timer.frequency - 2)

        shared_state.add(self)
        
        if config.get('general.use_bootstrap', True):
            bootstrappeers.add_bootstrap_list_to_peer_list(self, [], db_only=True)

        if not config.get('onboarding.done', True):
            logger.info('First run detected. Run openhome to get setup.', terminal=True)

            while not config.get('onboarding.done', True):
                time.sleep(5)

        # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
        try:
            while not self.shutdown:
                for i in self.timers:
                    if self.shutdown:
                        break
                    i.processTimer()
                time.sleep(self.delay)
                # Debug to print out used FDs (regular and net)
                #proc = psutil.Process()
                #print(proc.open_files(), len(psutil.net_connections()))
        except KeyboardInterrupt:
            self.shutdown = True
            pass

        logger.info('Goodbye. (Onionr is cleaning up, and will exit)', terminal=True)
        try:
            self.service_greenlets
        except AttributeError:
            pass
        else:
            # Stop onionr direct connection services
            for server in self.service_greenlets:
                server.stop()
        localcommand.local_command('shutdown') # shutdown the api
        try:
            time.sleep(0.5)
        except KeyboardInterrupt:
            pass
Esempio n. 7
0
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"):
    """Use communicator instance to download blocks in the comms's queue"""
    blacklist = onionrblacklist.OnionrBlackList()
    storage_counter = storagecounter.StorageCounter()
    LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(comm_inst.blockQueue):
        count += 1

        triedQueuePeers = [] # List of peers we've tried for a block
        try:
            blockPeers = list(comm_inst.blockQueue[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(comm_inst, blockHash):
            continue

        if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full():
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in comm_inst.currentDownloading:
            continue

        if len(comm_inst.onlinePeers) == 0:
            break

        comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
        if len(blockPeers) == 0:
            try:
                peerUsed = onlinepeers.pick_online_peer(comm_inst)
            except onionrexceptions.OnlinePeerNeeded:
                continue
        else:
            blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not comm_inst.shutdown and peerUsed.strip() != '':
            logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed))
        content = peeraction.peer_action(comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer (includes metadata)

        if content is not False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode() # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = \
                        validatemetadata.validate_metadata(metadata, metas[2])
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result: # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' % blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' % (blockHash,))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
                            blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
                            spawn(
                                local_command,
                                f'/daemon-event/upload_event',
                                post=True,
                                is_json=True,
                                post_data={'block': blockHash}
                            )
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash,))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash,))
                    else:
                        logger.warn('Metadata for block %s is invalid.' % (blockHash,))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(content) # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn(
                        'Block hash validation failed for ' +
                        blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del comm_inst.blockQueue[blockHash] # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                        [len(comm_inst.blockQueue)], terminal=True)
                        count = 0
                except KeyError:
                    pass
        comm_inst.currentDownloading.remove(blockHash)
    comm_inst.decrementThreadCount('getBlocks')