async def query(args): env = Env() db = DB(env) coin = env.coin await db._open_dbs(False) if not args.scripts: await print_stats(db.hist_db, db.utxo_db) return limit = args.limit for arg in args.scripts: hashX = arg_to_hashX(coin, arg) if not hashX: continue n = None for n, (tx_hash, height) in enumerate(db.get_history(hashX, limit), start=1): print(f'History #{n:,d}: height {height:,d} ' f'tx_hash {hash_to_hex_str(tx_hash)}') if n is None: print('No history found') n = None for n, utxo in enumerate(db.get_utxos(hashX, limit), start=1): print(f'UTXO #{n:,d}: tx_hash {hash_to_hex_str(utxo.tx_hash)} ' f'tx_pos {utxo.tx_pos:,d} height {utxo.height:,d} ' f'value {utxo.value:,d}') if n is None: print('No UTXOs found') balance = db.get_balance(hashX) print(f'Balance: {coin.decimal_value(balance):,f} {coin.SHORTNAME}')
def main(): env = Env() bp = DB(env) coin = env.coin if len(sys.argv) == 1: count_entries(bp.hist_db, bp.utxo_db) return argc = 1 try: limit = int(sys.argv[argc]) argc += 1 except Exception: limit = 10 for addr in sys.argv[argc:]: print('Address: ', addr) hashX = coin.address_to_hashX(addr) for n, (tx_hash, height) in enumerate(bp.get_history(hashX, limit)): print('History #{:d}: hash: {} height: {:d}'.format( n + 1, hash_to_hex_str(tx_hash), height)) n = None for n, utxo in enumerate(bp.get_utxos(hashX, limit)): print('UTXOs #{:d}: hash: {} pos: {:d} height: {:d} value: {:d}'. format(n + 1, hash_to_hex_str(utxo.tx_hash), utxo.tx_pos, utxo.height, utxo.value)) if n is None: print('No UTXOs') balance = bp.get_balance(hashX) print('Balance: {} {}'.format(coin.decimal_value(balance), coin.SHORTNAME))
async def serve(self, shutdown_event): '''Start the RPC server and wait for the mempool to synchronize. Then start serving external clients. ''' if not (0, 7, 1) <= aiorpcx_version < (0, 8): raise RuntimeError('aiorpcX version 0.7.x required with x >= 1') env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() self.logger.info(f'software version: {electrumx.version}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: {min_str}-{max_str}') self.logger.info(f'event loop policy: {env.loop_policy}') self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks') notifications = Notifications() Daemon = env.coin.DAEMON BlockProcessor = env.coin.BLOCK_PROCESSOR daemon = Daemon(env.coin, env.daemon_url) db = DB(env) bp = BlockProcessor(env, db, daemon, notifications) # Set ourselves up to implement the MemPoolAPI self.height = daemon.height self.cached_height = daemon.cached_height self.mempool_hashes = daemon.mempool_hashes self.raw_transactions = daemon.getrawtransactions self.lookup_utxos = db.lookup_utxos self.on_mempool = notifications.on_mempool MemPoolAPI.register(Controller) mempool = MemPool(env.coin, self) session_mgr = SessionManager(env, db, bp, daemon, mempool, notifications, shutdown_event) # Test daemon authentication, and also ensure it has a cached # height. Do this before entering the task group. await daemon.height() caught_up_event = Event() serve_externally_event = Event() synchronized_event = Event() async with TaskGroup() as group: await group.spawn(session_mgr.serve(serve_externally_event)) await group.spawn(bp.fetch_and_process_blocks(caught_up_event)) await caught_up_event.wait() await group.spawn(db.populate_header_merkle_cache()) await group.spawn(mempool.keep_synchronized(synchronized_event)) await synchronized_event.wait() serve_externally_event.set()
async def serve(self, shutdown_event): '''Start the RPC server and wait for the mempool to synchronize. Then start serving external clients. ''' if not (0, 22) <= aiorpcx_version < (0, 23): raise RuntimeError('aiorpcX version 0.22.x is required') env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() self.logger.info(f'software version: {electrumx.version}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: {min_str}-{max_str}') self.logger.info(f'event loop policy: {env.loop_policy}') self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks') notifications = Notifications() Daemon = env.coin.DAEMON BlockProcessor = env.coin.BLOCK_PROCESSOR async with Daemon(env.coin, env.daemon_url) as daemon: db = DB(env) bp = BlockProcessor(env, db, daemon, notifications) # Set notifications up to implement the MemPoolAPI def get_db_height(): return db.db_height notifications.height = daemon.height notifications.db_height = get_db_height notifications.cached_height = daemon.cached_height notifications.mempool_hashes = daemon.mempool_hashes notifications.raw_transactions = daemon.getrawtransactions notifications.lookup_utxos = db.lookup_utxos MemPoolAPI.register(Notifications) mempool = MemPool(env.coin, notifications) session_mgr = SessionManager(env, db, bp, daemon, mempool, shutdown_event) # Test daemon authentication, and also ensure it has a cached # height. Do this before entering the task group. await daemon.height() caught_up_event = Event() mempool_event = Event() async def wait_for_catchup(): await caught_up_event.wait() await group.spawn(db.populate_header_merkle_cache()) await group.spawn(mempool.keep_synchronized(mempool_event)) async with TaskGroup() as group: await group.spawn( session_mgr.serve(notifications, mempool_event)) await group.spawn(bp.fetch_and_process_blocks(caught_up_event)) await group.spawn(wait_for_catchup()) async for task in group: if not task.cancelled(): task.result()
async def query(args): env = Env() db = DB(env) coin = env.coin await db.open_for_serving() if not args.scripts: await print_stats(db.hist_db, db.utxo_db) return limit = args.limit for arg in args.scripts: hashX = arg_to_hashX(coin, arg) if not hashX: continue n = None history = await db.limited_history(hashX, limit=limit) for n, (tx_hash, height) in enumerate(history, start=1): print(f'History #{n:,d}: height {height:,d} ' f'tx_hash {hash_to_hex_str(tx_hash)}') if n is None: print('No history found') n = None utxos = await db.all_utxos(hashX) for n, utxo in enumerate(utxos, start=1): print(f'UTXO #{n:,d}: tx_hash {hash_to_hex_str(utxo.tx_hash)} ' f'tx_pos {utxo.tx_pos:,d} height {utxo.height:,d} ' f'value {utxo.value:,d}') if n == limit: break if n is None: print('No UTXOs found') balance = sum(utxo.value for utxo in utxos) print(f'Balance: {coin.decimal_value(balance):,f} {coin.SHORTNAME}')
def compact_history(): if sys.version_info < (3, 6): raise RuntimeError('Python >= 3.6 is required to run ElectrumX') environ['DAEMON_URL'] = '' # Avoid Env erroring out env = Env() db = DB(env) assert not db.first_sync history = db.history # Continue where we left off, if interrupted if history.comp_cursor == -1: history.comp_cursor = 0 history.comp_flush_count = max(history.comp_flush_count, 1) limit = 8 * 1000 * 1000 while history.comp_cursor != -1: history._compact_history(limit) # When completed also update the UTXO flush count db.set_flush_count(history.flush_count)
def run_test(db_dir): environ.clear() environ['DB_DIRECTORY'] = db_dir environ['DAEMON_URL'] = '' environ['COIN'] = 'BitcoinCash' env = Env() history = DB(env).history # Test abstract compaction check_hashX_compaction(history) # Now test in with random data histories = create_histories(history) check_written(history, histories) compact_history(history) check_written(history, histories)
async def run_test(db_dir): environ.clear() environ['DB_DIRECTORY'] = db_dir environ['DAEMON_URL'] = '' environ['COIN'] = 'Bitcoin' db = DB(Env()) await db.open_for_serving() history = db.history # Test abstract compaction check_hashX_compaction(history) # Now test in with random data histories = create_histories(history) check_written(history, histories) compact_history(history) check_written(history, histories)