def test_formatted_time(): assert util.formatted_time(0) == '00s' assert util.formatted_time(59) == '59s' assert util.formatted_time(60) == '01m 00s' assert util.formatted_time(3599) == '59m 59s' assert util.formatted_time(3600) == '01h 00m 00s' assert util.formatted_time(3600 * 24) == '1d 00h 00m' assert util.formatted_time(3600 * 24 * 367) == '367d 00h 00m' assert util.formatted_time(3600 * 24, ':') == '1d:00h:00m'
def read_utxo_state(self): state = self.utxo_db.get(b'state') if not state: self.db_height = -1 self.db_tx_count = 0 self.db_tip = b'\0' * 32 self.db_version = max(self.DB_VERSIONS) self.utxo_flush_count = 0 self.wall_time = 0 self.first_sync = True else: state = ast.literal_eval(state.decode()) if not isinstance(state, dict): raise self.DBError('failed reading state from DB') self.db_version = state['db_version'] if self.db_version not in self.DB_VERSIONS: raise self.DBError('your UTXO DB version is {} but this ' 'software only handles versions {}' .format(self.db_version, self.DB_VERSIONS)) # backwards compat genesis_hash = state['genesis'] if isinstance(genesis_hash, bytes): genesis_hash = genesis_hash.decode() if genesis_hash != self.coin.GENESIS_HASH: raise self.DBError('DB genesis hash {} does not match coin {}' .format(genesis_hash, self.coin.GENESIS_HASH)) self.db_height = state['height'] self.db_tx_count = state['tx_count'] self.db_tip = state['tip'] self.utxo_flush_count = state['utxo_flush_count'] self.wall_time = state['wall_time'] self.first_sync = state['first_sync'] # These are our state as we move ahead of DB state self.fs_height = self.db_height self.fs_tx_count = self.db_tx_count self.last_flush_tx_count = self.fs_tx_count # Upgrade DB if self.db_version != max(self.DB_VERSIONS): self.upgrade_db() # Log some stats self.logger.info('UTXO DB version: {:d}'.format(self.db_version)) self.logger.info('coin: {}'.format(self.coin.NAME)) self.logger.info('network: {}'.format(self.coin.NET)) self.logger.info('height: {:,d}'.format(self.db_height)) self.logger.info('tip: {}'.format(hash_to_hex_str(self.db_tip))) self.logger.info('tx count: {:,d}'.format(self.db_tx_count)) if self.utxo_db.for_sync: self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB') if self.first_sync: self.logger.info('sync time so far: {}' .format(util.formatted_time(self.wall_time)))
def open_dbs(self): '''Open the databases. If already open they are closed and re-opened. When syncing we want to reserve a lot of open files for the synchronization. When serving clients we want the open files for serving network connections. ''' def log_reason(message, is_for_sync): reason = 'sync' if is_for_sync else 'serving' self.logger.info('{} for {}'.format(message, reason)) # Assume we're serving until we find out otherwise for for_sync in [False, True]: if self.utxo_db: if self.utxo_db.for_sync == for_sync: return log_reason('closing DB to re-open', for_sync) self.utxo_db.close() self.history.close_db() # Open DB and metadata files. Record some of its state. self.utxo_db = self.db_class('utxo', for_sync) if self.utxo_db.is_new: self.logger.info('created new database') self.logger.info('creating metadata directory') os.mkdir('meta') with util.open_file('COIN', create=True) as f: f.write( 'ElectrumX databases and metadata for {} {}'.format( self.coin.NAME, self.coin.NET).encode()) else: log_reason('opened DB', self.utxo_db.for_sync) self.read_utxo_state() if self.first_sync == self.utxo_db.for_sync: break # Open history DB, clear excess history self.utxo_flush_count = self.history.open_db(self.db_class, for_sync, self.utxo_flush_count) self.clear_excess_undo_info() self.logger.info('DB version: {:d}'.format(self.db_version)) self.logger.info('coin: {}'.format(self.coin.NAME)) self.logger.info('network: {}'.format(self.coin.NET)) self.logger.info('height: {:,d}'.format(self.db_height)) self.logger.info('tip: {}'.format(hash_to_str(self.db_tip))) self.logger.info('tx count: {:,d}'.format(self.db_tx_count)) if self.first_sync: self.logger.info('sync time so far: {}'.format( util.formatted_time(self.wall_time)))
def sessions_text_lines(data): '''A generator returning lines for a list of sessions. data is the return value of rpc_sessions().''' fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} ' '{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}') yield fmt.format('ID', 'Flags', 'Client', 'Proto', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer') for (id_, flags, peer, client, proto, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size, time) in data: yield fmt.format(id_, flags, client, proto, '{:,d}'.format(reqs), '{:,d}'.format(txs_sent), '{:,d}'.format(subs), '{:,d}'.format(recv_count), '{:,d}'.format(recv_size // 1024), '{:,d}'.format(send_count), '{:,d}'.format(send_size // 1024), util.formatted_time(time, sep=''), peer)
def sessions_lines(data): '''A generator returning lines for a list of sessions. data is the return value of rpc_sessions().''' fmt = ('{:<6} {:<5} {:>17} {:>5} ' '{:>7} {:>7} {:>5} {:>5} {:>7} ' '{:>7} {:>7} {:>7} {:>7} {:>9} {:>21}') yield fmt.format('ID', 'Flags', 'Client', 'Proto', 'Cost', 'XCost', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer') for (id_, flags, peer, client, proto, cost, extra_cost, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size, conn_time) in data: yield fmt.format(id_, flags, client, proto, f'{int(cost):,d}', f'{int(extra_cost):,d}', f'{reqs:,d}', f'{txs_sent:,d}', f'{subs:,d}', f'{recv_count:,d}', f'{recv_size // 1024:,d}', f'{send_count:,d}', f'{send_size // 1024:,d}', util.formatted_time(conn_time, sep=''), peer)
def _get_info(self): '''A summary of server state.''' group_map = self._group_map() return { 'closing': len([s for s in self.sessions if s.is_closing()]), 'daemon': self.daemon.logged_url(), 'daemon_height': self.daemon.cached_height(), 'db_height': self.db.db_height, 'errors': sum(s.errors for s in self.sessions), 'groups': len(group_map), 'logged': len([s for s in self.sessions if s.log_me]), 'paused': sum(not s._can_send.is_set() for s in self.sessions), 'pid': os.getpid(), 'peers': self.peer_mgr.info(), 'requests': sum(s.count_pending_items() for s in self.sessions), 'sessions': self.session_count(), 'subs': self._sub_count(), 'txs_sent': self.txs_sent, 'uptime': util.formatted_time(time.time() - self.start_time), 'version': electrumx.version, }
def getinfo(self): '''A one-line summary of server state.''' group_map = self._group_map() return { 'version': VERSION, 'daemon': self.daemon.logged_url(), 'daemon_height': self.daemon.cached_height(), 'db_height': self.bp.db_height, 'closing': len([s for s in self.sessions if s.is_closing()]), 'errors': sum(s.rpc.errors for s in self.sessions), 'groups': len(group_map), 'logged': len([s for s in self.sessions if s.log_me]), 'paused': sum(s.paused for s in self.sessions), 'pid': os.getpid(), 'peers': self.peer_mgr.info(), 'requests': sum(s.count_pending_items() for s in self.sessions), 'sessions': self.session_count(), 'subs': self.sub_count(), 'txs_sent': self.txs_sent, 'uptime': util.formatted_time(time.time() - self.start_time), }
def flush(self, flush_utxos=False): '''Flush out cached state. History is always flushed. UTXOs are flushed if flush_utxos.''' if self.height == self.db_height: self.assert_flushed() return flush_start = time.time() last_flush = self.last_flush tx_diff = self.tx_count - self.last_flush_tx_count # Flush to file system self.fs_flush() fs_end = time.time() if self.utxo_db.for_sync: self.logger.info('flushed to FS in {:.1f}s'.format(fs_end - flush_start)) # History next - it's fast and frees memory hashX_count = self.history.flush() if self.utxo_db.for_sync: self.logger.info( 'flushed history in {:.1f}s for {:,d} addrs'.format( time.time() - fs_end, hashX_count)) # Flush state last as it reads the wall time. with self.utxo_db.write_batch() as batch: if flush_utxos: self.flush_utxos(batch) self.flush_state(batch) # Update and put the wall time again - otherwise we drop the # time it took to commit the batch self.flush_state(self.utxo_db) self.logger.info( 'flush #{:,d} took {:.1f}s. Height {:,d} txs: {:,d}'.format( self.history.flush_count, self.last_flush - flush_start, self.height, self.tx_count)) # Catch-up stats if self.utxo_db.for_sync: tx_per_sec = int(self.tx_count / self.wall_time) this_tx_per_sec = 1 + int(tx_diff / (self.last_flush - last_flush)) self.logger.info('tx/sec since genesis: {:,d}, ' 'since last flush: {:,d}'.format( tx_per_sec, this_tx_per_sec)) daemon_height = self.daemon.cached_height() if self.height > self.coin.TX_COUNT_HEIGHT: tx_est = (daemon_height - self.height) * self.coin.TX_PER_BLOCK else: tx_est = ((daemon_height - self.coin.TX_COUNT_HEIGHT) * self.coin.TX_PER_BLOCK + (self.coin.TX_COUNT - self.tx_count)) # Damp the enthusiasm realism = 2.0 - 0.9 * self.height / self.coin.TX_COUNT_HEIGHT tx_est *= max(realism, 1.0) self.logger.info('sync time: {} ETA: {}'.format( formatted_time(self.wall_time), formatted_time(tx_est / this_tx_per_sec)))
def time_fmt(t): if not t: return 'Never' return util.formatted_time(now - t)