Exemple #1
0
def test_formatted_time():
    assert util.formatted_time(0) == '00s'
    assert util.formatted_time(59) == '59s'
    assert util.formatted_time(60) == '01m 00s'
    assert util.formatted_time(3599) == '59m 59s'
    assert util.formatted_time(3600) == '01h 00m 00s'
    assert util.formatted_time(3600*24) == '1d 00h 00m'
    assert util.formatted_time(3600*24*367) == '367d 00h 00m'
    assert util.formatted_time(3600*24, ':') == '1d:00h:00m'
Exemple #2
0
 def server_status(self):
     '''A one-line summary of server state.'''
     group_map = self._group_map()
     return {
         'height': self.bp.db_height,
         'txs_sent': self.txs_sent,
         'uptime': util.formatted_time(time.time() - self.start_time),
     }
Exemple #3
0
    def open_dbs(self):
        '''Open the databases.  If already open they are closed and re-opened.

        When syncing we want to reserve a lot of open files for the
        synchtonization.  When serving clients we want the open files for
        serving network connections.
        '''
        def log_reason(message, is_for_sync):
            reason = 'sync' if is_for_sync else 'serving'
            self.logger.info('{} for {}'.format(message, reason))

        # Assume we're serving until we find out otherwise
        for for_sync in [False, True]:
            if self.utxo_db:
                if self.utxo_db.for_sync == for_sync:
                    return
                log_reason('closing DB to re-open', for_sync)
                self.utxo_db.close()
                self.hist_db.close()
                self.eventlog_db.close()
                self.hashY_db.close()

            # Open DB and metadata files.  Record some of its state.
            self.utxo_db = self.db_class('utxo', for_sync)
            self.hist_db = self.db_class('hist', for_sync)
            self.eventlog_db = self.db_class('eventlog', for_sync)
            self.hashY_db = self.db_class('hashY', for_sync)

            if self.utxo_db.is_new:
                self.logger.info('created new database')
                self.logger.info('creating metadata directory')
                os.mkdir('meta')
                with util.open_file('COIN', create=True) as f:
                    f.write(
                        'ElectrumX databases and metadata for {} {}'.format(
                            self.coin.NAME, self.coin.NET).encode())
            else:
                log_reason('opened DB', self.utxo_db.for_sync)

            self.read_utxo_state()
            if self.first_sync == self.utxo_db.for_sync:
                break

        self.read_history_state()
        self.read_eventlog_state()

        self.logger.info('DB version: {:d}'.format(self.db_version))
        self.logger.info('coin: {}'.format(self.coin.NAME))
        self.logger.info('network: {}'.format(self.coin.NET))
        self.logger.info('height: {:,d}'.format(self.db_height))
        self.logger.info('tip: {}'.format(hash_to_str(self.db_tip)))
        self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
        self.logger.info('flush count: {:,d}'.format(self.flush_count))
        self.logger.info('eventlog flush count: {:,d}'.format(
            self.eventlog_flush_count))
        if self.first_sync:
            self.logger.info('sync time so far: {}'.format(
                util.formatted_time(self.wall_time)))
Exemple #4
0
    def sessions_text_lines(data):
        '''A generator returning lines for a list of sessions.

        data is the return value of rpc_sessions().'''
        fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} '
               '{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
        yield fmt.format('ID', 'Flags', 'Client', 'Reqs', 'Txs', 'Subs',
                         'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
        for (id_, flags, peer, client, reqs, txs_sent, subs, recv_count,
             recv_size, send_count, send_size, time) in data:
            yield fmt.format(id_, flags, client, '{:,d}'.format(reqs),
                             '{:,d}'.format(txs_sent), '{:,d}'.format(subs),
                             '{:,d}'.format(recv_count),
                             '{:,d}'.format(recv_size // 1024),
                             '{:,d}'.format(send_count),
                             '{:,d}'.format(send_size // 1024),
                             util.formatted_time(time, sep=''), peer)
 def getinfo(self):
     '''A one-line summary of server state.'''
     return {
         'daemon': self.daemon.logged_url(),
         'daemon_height': self.daemon.cached_height(),
         'db_height': self.bp.db_height,
         'closing': len([s for s in self.sessions if s.is_closing()]),
         'errors': sum(s.error_count for s in self.sessions),
         'groups': len(self.groups),
         'logged': len([s for s in self.sessions if s.log_me]),
         'paused': sum(s.pause for s in self.sessions),
         'pid': os.getpid(),
         'peers': self.peer_mgr.info(),
         'requests': sum(s.count_pending_items() for s in self.sessions),
         'sessions': self.session_count(),
         'subs': self.sub_count(),
         'txs_sent': self.txs_sent,
         'uptime': util.formatted_time(time.time() - self.start_time),
     }
Exemple #6
0
 def time_fmt(t):
     if not t:
         return 'Never'
     return util.formatted_time(now - t)
Exemple #7
0
    def flush(self, flush_utxos=False):
        '''Flush out cached state.

        History is always flushed.  UTXOs are flushed if flush_utxos.'''
        if self.height == self.db_height:
            self.assert_flushed()
            return

        flush_start = time.time()
        last_flush = self.last_flush
        tx_diff = self.tx_count - self.last_flush_tx_count

        # Flush to file system
        self.fs_flush()
        fs_end = time.time()
        if self.utxo_db.for_sync:
            self.logger.info('flushed to FS in {:.1f}s'.format(fs_end -
                                                               flush_start))

        # History next - it's fast and frees memory
        self.flush_history(self.history)
        if self.utxo_db.for_sync:
            self.logger.info(
                'flushed history in {:.1f}s for {:,d} addrs'.format(
                    time.time() - fs_end, len(self.history)))
        self.history = defaultdict(partial(array.array, 'I'))
        self.history_size = 0

        # Flush state last as it reads the wall time.
        with self.utxo_db.write_batch() as batch:
            if flush_utxos:
                self.flush_utxos(batch)
            self.flush_state(batch)

        # Update and put the wall time again - otherwise we drop the
        # time it took to commit the batch
        self.flush_state(self.utxo_db)

        self.logger.info(
            'flush #{:,d} took {:.1f}s.  Height {:,d} txs: {:,d}'.format(
                self.flush_count, self.last_flush - flush_start, self.height,
                self.tx_count))

        # Catch-up stats
        if self.utxo_db.for_sync:
            tx_per_sec = int(self.tx_count / self.wall_time)
            this_tx_per_sec = 1 + int(tx_diff / (self.last_flush - last_flush))
            self.logger.info('tx/sec since genesis: {:,d}, '
                             'since last flush: {:,d}'.format(
                                 tx_per_sec, this_tx_per_sec))

            daemon_height = self.daemon.cached_height()
            if self.height > self.coin.TX_COUNT_HEIGHT:
                tx_est = (daemon_height - self.height) * self.coin.TX_PER_BLOCK
            else:
                tx_est = ((daemon_height - self.coin.TX_COUNT_HEIGHT) *
                          self.coin.TX_PER_BLOCK +
                          (self.coin.TX_COUNT - self.tx_count))

            # Damp the enthusiasm
            realism = 2.0 - 0.9 * self.height / self.coin.TX_COUNT_HEIGHT
            tx_est *= max(realism, 1.0)

            self.logger.info('sync time: {}  ETA: {}'.format(
                formatted_time(self.wall_time),
                formatted_time(tx_est / this_tx_per_sec)))