예제 #1
0
def better_sleep(wait: int):
    """Sleep catching ctrl c for wait seconds."""
    start = get_epoch()
    try:
        sleep(wait)
    except KeyboardInterrupt:
        better_sleep(wait - (get_epoch() - start))
예제 #2
0
def cooldown_peer(comm_inst):
    """Randomly add an online peer to cooldown, so we can connect a new one."""
    config = comm_inst.config
    online_peer_amount = len(comm_inst.onlinePeers)
    minTime = 300
    cooldown_time = 600
    to_cool = ''
    tempConnectTimes = dict(comm_inst.connectTimes)

    # Remove peers from cooldown that have been there long enough
    tempCooldown = dict(comm_inst.cooldownPeer)
    for peer in tempCooldown:
        if (epoch.get_epoch() - tempCooldown[peer]) >= cooldown_time:
            del comm_inst.cooldownPeer[peer]

    # Cool down a peer, if we have max connections alive for long enough
    if online_peer_amount >= config.get('peers.max_connect', 10, save=True):
        finding = True

        while finding:
            try:
                to_cool = min(tempConnectTimes, key=tempConnectTimes.get)
                if (epoch.get_epoch() - tempConnectTimes[to_cool]) < minTime:
                    del tempConnectTimes[to_cool]
                else:
                    finding = False
            except ValueError:
                break
        else:
            onlinepeers.remove_online_peer(comm_inst, to_cool)
            comm_inst.cooldownPeer[to_cool] = epoch.get_epoch()

    comm_inst.decrementThreadCount('cooldown_peer')
예제 #3
0
def cooldown_peer(shared_state):
    """Randomly add an online peer to cooldown, so we can connect a new one."""
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    config = shared_state.get_by_string("OnionrCommunicatorDaemon").config
    online_peer_amount = len(kv.get('onlinePeers'))
    minTime = 300
    cooldown_time = 600
    to_cool = ''
    tempConnectTimes = dict(kv.get('connectTimes'))

    # Remove peers from cooldown that have been there long enough
    tempCooldown = dict(kv.get('cooldownPeer'))
    for peer in tempCooldown:
        if (epoch.get_epoch() - tempCooldown[peer]) >= cooldown_time:
            del kv.get('cooldownPeer')[peer]

    # Cool down a peer, if we have max connections alive for long enough
    if online_peer_amount >= config.get('peers.max_connect', 10, save=True):
        finding = True

        while finding:
            try:
                to_cool = min(tempConnectTimes, key=tempConnectTimes.get)
                if (epoch.get_epoch() - tempConnectTimes[to_cool]) < minTime:
                    del tempConnectTimes[to_cool]
                else:
                    finding = False
            except ValueError:
                break
        else:
            onlinepeers.remove_online_peer(kv, to_cool)
            kv.get('cooldownPeer')[to_cool] = epoch.get_epoch()
예제 #4
0
def stress_test_block_insert(testmanager):
    start = epoch.get_epoch()
    count = 100
    max_insert_speed = 120
    for x in range(count):
        onionrblocks.insert(os.urandom(32))
    speed = epoch.get_epoch() - start
    if speed < max_insert_speed:
        raise ValueError(
            f'{count} blocks inserted too fast, {max_insert_speed}, got {speed}'
        )
    logger.info(
        f'runtest stress block insertion: {count} blocks inserted in {speed}s')
예제 #5
0
 def _loadData(self):
     self.lastRead = epoch.get_epoch()
     retData = {}
     if os.path.exists(self.dataFile):
         with open(self.dataFile, 'r') as dataFile:
             retData = json.loads(dataFile.read())
     return retData
예제 #6
0
    def addToDB(self, data, dataType=0, expire=0):
        '''Add to the blacklist. Intended to be block hash, block data, peers, or transport addresses
        0=block
        1=peer
        2=pubkey
        '''

        # we hash the data so we can remove data entirely from our node's disk
        hashed = bytesconverter.bytes_to_str(onionrcrypto.hashers.sha3_hash(data))
        if len(hashed) > 64:
            raise Exception("Hashed data is too large")

        if not hashed.isalnum():
            raise Exception("Hashed data is not alpha numeric")
        try:
            int(dataType)
        except ValueError:
            raise Exception("dataType is not int")
        try:
            int(expire)
        except ValueError:
            raise Exception("expire is not int")
        if self.inBlacklist(hashed):
            return
        insert = (hashed,)
        blacklistDate = epoch.get_epoch()
        try:
            self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
        except sqlite3.IntegrityError:
            pass
예제 #7
0
def net_check(shared_state):
    """Check if we are connected to the internet.

    or not when we can't connect to any peers
    """
    # for detecting if we have received incoming connections recently
    rec = False
    kv: "DeadSimpleKV" = shared_state.get_by_string("DeadSimpleKV")
    proxy_port = shared_state.get_by_string("NetController").socksPort

    if len(kv.get('onlinePeers')) == 0:
        try:
            if (epoch.get_epoch() -
                    int(localcommand.local_command('/lastconnect'))) <= 60:
                kv.put('isOnline', True)
                rec = True
        except ValueError:
            pass
        if not rec and not netutils.check_network(torPort=proxy_port):
            if not kv.get('shutdown'):
                if not shared_state.get_by_string(
                        "OnionrCommunicatorDaemon").config.get(
                            'general.offline_mode', False):
                    logger.warn(
                        'Network check failed, are you connected to ' +
                        'the Internet, and is Tor working? ' +
                        'This is usually temporary, but bugs and censorship can cause this to persist, in which case you should report it to beardog [at] mailbox.org',  # noqa
                        terminal=True)
                    restarttor.restart(shared_state)
                    kv.put('offlinePeers', [])
            kv.put('isOnline', False)
        else:
            kv.put('isOnline', True)
예제 #8
0
파일: main.py 프로젝트: x0rzkov/onionr
    def start(self):
        logger.warn("Please note: everything said here is public, even if a random channel name is used.", terminal=True)
        message = ""
        self.flowRunning = True
        try:
            self.channel = logger.readline("Enter a channel name or none for default:").strip()
        except (KeyboardInterrupt, EOFError) as e:
            self.flowRunning = False
        newThread = threading.Thread(target=self.showOutput, daemon=True)
        newThread.start()
        while self.flowRunning:
            if self.channel == "":
                self.channel = "global"
            try:
                message = logger.readline('\nInsert message into flow:').strip().replace('\n', '\\n').replace('\r', '\\r')
            except EOFError:
                pass
            except KeyboardInterrupt:
                self.flowRunning = False
            else:
                if message == "q":
                    self.flowRunning = False
                expireTime = epoch.get_epoch() + 43200
                if len(message) > 0:
                    logger.info('Inserting message as block...', terminal=True)
                    onionrblocks.insert(message, header='brd', expire=expireTime, meta={'ch': self.channel})

        logger.info("Flow is exiting, goodbye", terminal=True)
        return
예제 #9
0
def peer_cleanup():
    '''Removes peers who have been offline too long or score too low'''
    logger.info('Cleaning peers...')
    blacklist = onionrblacklist.OnionrBlackList()
    adders = scoresortedpeerlist.get_score_sorted_peer_list()
    adders.reverse()

    if len(adders) > 1:

        min_score = int(config.get('peers.minimum_score', -100))
        max_peers = int(config.get('peers.max_stored', 5000))

        for address in adders:
            # Remove peers that go below the negative score
            if peerprofiles.PeerProfiles(address).score < min_score:
                keydb.removekeys.remove_address(address)
                try:
                    lastConnect = int(
                        keydb.transportinfo.get_address_info(
                            address, 'lastConnect'))
                    expireTime = 86400 - int(epoch.get_epoch()) - lastConnect
                    blacklist.addToDB(address, dataType=1, expire=expireTime)
                except sqlite3.IntegrityError:  #TODO just make sure its not a unique constraint issue
                    pass
                except ValueError:
                    pass
                logger.warn('Removed address ' + address + '.')

    # Unban probably not malicious peers TODO improve
    blacklist.deleteExpired(dataType=1)
예제 #10
0
def net_check(comm_inst):
    """Check if we are connected to the internet.

    or not when we can't connect to any peers
    """
    # for detecting if we have received incoming connections recently
    rec = False
    if len(comm_inst.onlinePeers) == 0:
        try:
            if (epoch.get_epoch() -
                    int(localcommand.local_command('/lastconnect'))) <= 60:
                comm_inst.isOnline = True
                rec = True
        except ValueError:
            pass
        if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort):
            if not comm_inst.shutdown:
                if not comm_inst.config.get('general.offline_mode', False):
                    logger.warn(
                        'Network check failed, are you connected to ' +
                        'the Internet, and is Tor working? ' +
                        'This is usually temporary, but bugs and censorship can cause this to persist, in which case you should report it to beardog [at] mailbox.org',  # noqa
                        terminal=True)
                    restarttor.restart(comm_inst)
                    comm_inst.offlinePeers = []
            comm_inst.isOnline = False
        else:
            comm_inst.isOnline = True
    comm_inst.decrementThreadCount('net_check')
예제 #11
0
def peer_action(comm_inst, peer, action, returnHeaders=False, max_resp_size=5242880):
    '''Perform a get request to a peer'''
    penalty_score = -10
    if len(peer) == 0:
        return False
    url = 'http://%s/%s' % (peer, action)

    try:
        ret_data = basicrequests.do_get_request(url, port=comm_inst.proxyPort,
                                                max_size=max_resp_size)
    except streamedrequests.exceptions.ResponseLimitReached:
        logger.warn('Request failed due to max response size being overflowed', terminal=True)
        ret_data = False
        penalty_score = -100
    # if request failed, (error), mark peer offline
    if ret_data == False: # For some reason "if not" breaks this. Prob has to do with empty string.
        try:
            comm_inst.getPeerProfileInstance(peer).addScore(penalty_score)
            onlinepeers.remove_online_peer(comm_inst, peer)
            keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch())
            if action != 'ping' and not comm_inst.shutdown:
                logger.warn(f'Lost connection to {peer}', terminal=True)
                onlinepeers.get_online_peers(comm_inst) # Will only add a new peer to pool if needed
        except ValueError:
            pass
    else:
        peer_profile = comm_inst.getPeerProfileInstance(peer)
        peer_profile.update_connect_time()
        peer_profile.addScore(1)
    return ret_data # If returnHeaders, returns tuple of data, headers. if not, just data string
예제 #12
0
파일: __init__.py 프로젝트: x0rzkov/onionr
    def __init__(self):
        '''
            Initialize the api server, preping variables for later use

            This initialization defines all of the API entry points
            and handlers for the endpoints and errors
            This also saves the used host (random localhost IP address) to the data folder in host.txt
        '''
        self.config = config

        self.startTime = epoch.get_epoch()
        app = flask.Flask(__name__)
        bind_port = int(config.get('client.client.port', 59496))
        self.bindPort = bind_port

        self.clientToken = config.get('client.webpassword')

        self.host = httpapi.apiutils.setbindip.set_bind_IP(
            private_API_host_file)
        logger.info('Running api on %s:%s' % (self.host, self.bindPort))
        self.httpServer = ''

        self.queueResponse = {}
        self.get_block_data = httpapi.apiutils.GetBlockData(self)
        register_private_blueprints.register_private_blueprints(self, app)
        httpapi.load_plugin_blueprints(app)
        self.app = app
예제 #13
0
파일: __init__.py 프로젝트: x0rzkov/onionr
 def getUptime(self) -> int:
     while True:
         try:
             return epoch.get_epoch() - self.startTime
         except (AttributeError, NameError):
             # Don't error on race condition with startup
             pass
예제 #14
0
    def addForwardKey(self, newKey, expire=DEFAULT_KEY_EXPIRE):
        newKey = bytesconverter.bytes_to_str(
            unpaddedbase32.repad(bytesconverter.str_to_bytes(newKey)))
        if not stringvalidators.validate_pub_key(newKey):
            # Do not add if something went wrong with the key
            raise onionrexceptions.InvalidPubkey(newKey)

        conn = sqlite3.connect(dbfiles.user_id_info_db,
                               timeout=DATABASE_LOCK_TIMEOUT)
        c = conn.cursor()

        # Get the time we're inserting the key at
        timeInsert = epoch.get_epoch()

        # Look at our current keys for duplicate key data or time
        for entry in self._getForwardKeys():
            if entry[0] == newKey:
                return False
            if entry[1] == timeInsert:
                timeInsert += 1
                # Sleep if our time is the same to prevent dupe time records
                time.sleep(1)

        # Add a forward secrecy key for the peer
        # Prepare the insert
        command = (self.publicKey, newKey, timeInsert, timeInsert + expire)

        c.execute("INSERT INTO forwardKeys VALUES(?, ?, ?, ?);", command)

        conn.commit()
        conn.close()
        return True
예제 #15
0
 def compile_data():
     return {
         'time': epoch.get_epoch(),
         'adders': get_transports(),
         'peers':
         shared_state.get_by_string('DeadSimpleKV').get('onlinePeers')
     }
예제 #16
0
    def __init__(self, auto_refresh=True, block_type=''):
        self.block_type = block_type
        self.refresh_db()
        self.check_time = get_epoch()

        class Refresher(FileSystemEventHandler):
            @staticmethod
            def on_modified(event):
                if event.src_path != block_meta_db:
                    return
                self.refresh_db()

        if auto_refresh:

            def auto_refresher():
                observer = Observer()
                observer.schedule(Refresher(),
                                  identify_home(),
                                  recursive=False)
                observer.start()
                while observer.is_alive():
                    # call import func with timeout
                    observer.join(120)

            Thread(target=auto_refresher, daemon=True).start()
예제 #17
0
파일: listkeys.py 프로젝트: infoabcd/inti
def list_adders(randomOrder=True, i2p=True, recent=0):
    '''
        Return a list of transport addresses
    '''
    conn = sqlite3.connect(dbfiles.address_info_db,
                           timeout=onionrvalues.DATABASE_LOCK_TIMEOUT)
    c = conn.cursor()
    if randomOrder:
        addresses = c.execute('SELECT * FROM adders ORDER BY RANDOM();')
    else:
        addresses = c.execute('SELECT * FROM adders;')
    addressList = []
    for i in addresses:
        if len(i[0].strip()) == 0:
            continue
        addressList.append(i[0])
    conn.close()
    testList = list(addressList)  # create new list to iterate
    for address in testList:
        try:
            if recent > 0 and (epoch.get_epoch() -
                               transportinfo.get_address_info(
                                   address, 'lastConnect')) > recent:
                raise TypeError  # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
        except TypeError:
            addressList.remove(address)
    return addressList
예제 #18
0
    def get_stats(self):
        """Return statistics about our node"""
        stats = {}
        proc = Process()

        def get_open_files():
            if WINDOWS:
                return proc.num_handles()
            return proc.num_fds()

        try:
            self._too_many
        except AttributeError:
            sleep(1)
        kv: "DeadSimpleKV" = self._too_many.get_by_string("DeadSimpleKV")
        connected = []
        [
            connected.append(x) for x in kv.get('onlinePeers')
            if x not in connected
        ]
        stats['uptime'] = get_epoch() - kv.get('startTime')
        stats['connectedNodes'] = '\n'.join(connected)
        stats['blockCount'] = len(blockmetadb.get_block_list())
        stats['blockQueueCount'] = len(kv.get('blockQueue'))
        stats['threads'] = proc.num_threads()
        stats['ramPercent'] = proc.memory_percent()
        stats['fd'] = get_open_files()
        stats['diskUsage'] = human_size(size(identify_home()))
        return json.dumps(stats)
예제 #19
0
 def test_inc_score_with_db(self):
     p = peerprofiles.PeerProfiles(test_peers.pop())
     s = 0
     for x in range(2):
         p.last_updated['score'] = epoch.get_epoch() - peerprofiles.UPDATE_DELAY
         s += 1
         p.addScore(1)
         self.assertEqual(p.score, keydb.transportinfo.get_address_info(p.address, 'success'))
예제 #20
0
파일: __init__.py 프로젝트: threeape/onionr
 def getUptime(self) -> int:
     """Safely wait for uptime to be set and return it."""
     while True:
         try:
             return epoch.get_epoch() - self.startTime
         except (AttributeError, NameError):
             # Don't error on race condition with startup
             pass
예제 #21
0
 def addToSent(self, blockID, peer, message, subject=''):
     blockID = reconstructhash.deconstruct_hash(blockID)
     self.connect()
     args = (blockID, peer, message, subject, epoch.get_epoch())
     self.cursor.execute('INSERT INTO sent VALUES(?, ?, ?, ?, ?)', args)
     self.conn.commit()
     self.close()
     return
예제 #22
0
 def compile_data():
     return {
         'time':
         epoch.get_epoch(),
         'adders':
         get_transports(),
         'peers':
         shared_state.get_by_string('OnionrCommunicatorDaemon').onlinePeers
     }
예제 #23
0
파일: __init__.py 프로젝트: x0rzkov/onionr
 def run_tests(self):
     cur_time = epoch.get_epoch()
     logger.info(f"Doing runtime tests at {cur_time}")
     try:
         for i in RUN_TESTS:
             last = i
             i(self)
             logger.info(last.__name__ + " passed")
     except ValueError:
         logger.error(last.__name__ + ' failed')
예제 #24
0
    def get_info(self, key, forceReload=False):
        if self.deleted:
            raise onionrexceptions.ContactDeleted

        if (epoch.get_epoch() - self.lastRead >=
                self.recordExpire) or forceReload:
            self.data = self._loadData()
        try:
            return self.data[key]
        except KeyError:
            return None
예제 #25
0
파일: session.py 프로젝트: x0rzkov/onionr
    def __init__(self, block_hash: Union[str, bytes]):
        block_hash = bytesconverter.bytes_to_str(block_hash)
        block_hash = reconstructhash.reconstruct_hash(block_hash)
        if not stringvalidators.validate_hash(block_hash): raise ValueError

        self.start_time = epoch.get_epoch()
        self.block_hash = reconstructhash.deconstruct_hash(block_hash)
        self.total_fail_count: int = 0
        self.total_success_count: int = 0
        self.peer_fails = {}
        self.peer_exists = {}
예제 #26
0
def deleteExpiredKeys():
    # Fetch the keys we generated for the peer, that are still around
    conn = sqlite3.connect(dbfiles.forward_keys_db, timeout=10)
    c = conn.cursor()

    curTime = epoch.get_epoch()
    c.execute("DELETE from myForwardKeys where expire <= ?", (curTime, ))
    conn.commit()
    conn.execute("VACUUM")
    conn.close()
    return
예제 #27
0
def deleteTheirExpiredKeys(pubkey):
    conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
    c = conn.cursor()

    # Prepare the insert
    command = (pubkey, epoch.get_epoch())

    c.execute("DELETE from forwardKeys where peerKey = ? and expire <= ?",
              command)

    conn.commit()
    conn.close()
예제 #28
0
파일: __init__.py 프로젝트: infoabcd/inti
    def run_tests(self):
        tests = list(RUN_TESTS)
        SystemRandom().shuffle(tests)
        cur_time = epoch.get_epoch()
        logger.info(f"Doing runtime tests at {cur_time}")

        try:
            os.remove(SUCCESS_FILE)
        except FileNotFoundError:
            pass

        done_count: int = 0
        total_to_do: int = len(tests)

        try:
            for i in tests:
                last = i
                logger.info("[RUNTIME TEST] " + last.__name__ + " started",
                            terminal=True,
                            timestamp=True)
                i(self)
                done_count += 1
                logger.info("[RUNTIME TEST] " + last.__name__ +
                            f" passed {done_count}/{total_to_do}",
                            terminal=True,
                            timestamp=True)
        except (ValueError, AttributeError):
            logger.error(last.__name__ + ' failed assertions', terminal=True)
        except Exception as e:
            logger.error(last.__name__ +
                         ' failed with non-asserting exception')
            logger.error(str(e))
        else:
            ep = str(epoch.get_epoch())
            logger.info(f'All runtime tests passed at {ep}', terminal=True)
            with open(SUCCESS_FILE, 'w') as f:
                f.write(ep)
예제 #29
0
def on_circlesend_cmd(api, data=None):
    err_msg = "Second arg is board name, third is quoted message"
    try:
        sys.argv[2]
    except IndexError:
        logger.error(err_msg, terminal=True)
    try:
        sys.argv[3]
    except IndexError:
        logger.error(err_msg, terminal=True)

    bl = onionrblocks.insert(sys.argv[3], header='brd',
            expire=(EXPIRE_TIME + epoch.get_epoch()),
            meta={'ch': sys.argv[2]})
    print(bl)
예제 #30
0
def get_expired_blocks():
    '''Returns a list of expired blocks'''
    conn = sqlite3.connect(dbfiles.block_meta_db,
                           timeout=onionrvalues.DATABASE_LOCK_TIMEOUT)
    c = conn.cursor()
    date = int(epoch.get_epoch())

    compiled = (date, )
    execute = 'SELECT hash FROM hashes WHERE expire <= ? ORDER BY dateReceived;'

    rows = list()
    for row in c.execute(execute, compiled):
        for i in row:
            rows.append(i)
    conn.close()
    return rows