def reparse(db, block_index=None, quiet=False): """Reparse all transactions (atomically). If block_index is set, rollback to the end of that block. """ logger.info('Reparsing all transactions.') check.software_version() cursor = db.cursor() if quiet: root_logger = logging.getLogger() root_level = logger.getEffectiveLevel() with db: reinitialise(db, block_index) # Reparse all blocks, transactions. if quiet: root_logger.setLevel(logging.WARNING) previous_ledger_hash, previous_txlist_hash = None, None cursor.execute('''SELECT * FROM blocks ORDER BY block_index''') for block in cursor.fetchall(): logger.info('Block (re‐parse): {}'.format(str(block['block_index']))) util.CURRENT_BLOCK_INDEX = block['block_index'] previous_ledger_hash, previous_txlist_hash = parse_block(db, block['block_index'], block['block_time'], previous_ledger_hash, previous_txlist_hash) if quiet: root_logger.setLevel(root_level) # Check for conservation of assets. check.asset_conservation(db) # Update minor version number. cursor.execute('PRAGMA user_version = {}'.format(int(config.VERSION_MINOR))) # Syntax?! logger.info('Database minor version number updated.') cursor.close() return
def follow(db): # Check software version. check.software_version() # Initialise. initialise(db) # Get index of last block. if util.CURRENT_BLOCK_INDEX == 0: logger.warning('New database.') block_index = config.BLOCK_FIRST else: block_index = util.CURRENT_BLOCK_INDEX + 1 # Check database version. try: check.database_version(db) except check.DatabaseVersionError as e: logger.info(str(e)) # no need to reparse or rollback a new database if block_index != config.BLOCK_FIRST: reparse(db, block_index=e.reparse_block_index, quiet=False) database.update_version(db) logger.info('Resuming parsing.') # Get index of last transaction. tx_index = get_next_tx_index(db) not_supported = {} # No false positives. Use a dict to allow for O(1) lookups not_supported_sorted = collections.deque() # ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries mempool_initialised = False backend.init_mempool_cache() cursor = db.cursor() # a reorg can happen without the block count increasing, or even for that # matter, with the block count decreasing. This should only delay # processing of the new blocks a bit. while True: starttime = time.time() # Get block count. # If the backend is unreachable and `config.FORCE` is set, just sleep # and try again repeatedly. try: block_count = backend.getblockcount() except (ConnectionRefusedError, http.client.CannotSendRequest, backend.addrindex.BackendRPCError) as e: if config.FORCE: time.sleep(config.BACKEND_POLL_INTERVAL) continue else: raise e # Get new blocks. if block_index <= block_count: # Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found. current_index = block_index requires_rollback = False while True: if current_index == config.BLOCK_FIRST: break logger.debug('Checking that block {} is not an orphan.'.format(current_index)) # Backend parent hash. current_hash = backend.getblockhash(current_index) current_cblock = backend.getblock(current_hash) backend_parent = SatoshiChainlib.core.b2lx(current_cblock.hashPrevBlock) # DB parent hash. blocks = list(cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (current_index - 1,))) if len(blocks) != 1: # For empty DB. break db_parent = blocks[0]['block_hash'] # Compare. assert type(db_parent) == str assert type(backend_parent) == str if db_parent == backend_parent: break else: current_index -= 1 requires_rollback = True # Rollback for reorganisation. if requires_rollback: # Record reorganisation. logger.warning('Blockchain reorganisation at block {}.'.format(current_index)) log.message(db, block_index, 'reorg', None, {'block_index': current_index}) # Rollback the DB. reparse(db, block_index=current_index-1, quiet=True) block_index = current_index tx_index = get_next_tx_index(db) continue # Check version. (Don’t add any blocks to the database while # running an out‐of‐date client!) check.software_version() # Get and parse transactions in this block (atomically). block_hash = backend.getblockhash(current_index) block = backend.getblock(block_hash) previous_block_hash = SatoshiChainlib.core.b2lx(block.hashPrevBlock) block_time = block.nTime txhash_list = backend.get_txhash_list(block) raw_transactions = backend.getrawtransaction_batch(txhash_list) with db: util.CURRENT_BLOCK_INDEX = block_index # List the block. cursor.execute('''INSERT INTO blocks( block_index, block_hash, block_time, previous_block_hash, difficulty) VALUES(?,?,?,?,?)''', (block_index, block_hash, block_time, previous_block_hash, block.difficulty) ) # List the transactions in the block. for tx_hash in txhash_list: tx_hex = raw_transactions[tx_hash] tx_index = list_tx(db, block_hash, block_index, block_time, tx_hash, tx_index, tx_hex) # Parse the transactions in the block. parse_block(db, block_index, block_time) # When newly caught up, check for conservation of assets. if block_index == block_count: if config.CHECK_ASSET_CONSERVATION: check.asset_conservation(db) # Remove any non‐supported transactions older than ten blocks. while len(not_supported_sorted) and not_supported_sorted[0][0] <= block_index - 10: tx_h = not_supported_sorted.popleft()[1] del not_supported[tx_h] logger.info('Block: %s (%ss)'%(str(block_index), "{:.2f}".format(time.time() - starttime, 3))) # Increment block index. block_count = backend.getblockcount() block_index += 1 else: # First mempool fill for session? if mempool_initialised: logger.debug('Updating mempool.') else: logger.debug('Initialising mempool.') # Get old mempool. old_mempool = list(cursor.execute('''SELECT * FROM mempool''')) old_mempool_hashes = [message['tx_hash'] for message in old_mempool] # Fake values for fake block. curr_time = int(time.time()) mempool_tx_index = tx_index # For each transaction in Shellcoin Core mempool, if it’s new, create # a fake block, a fake transaction, capture the generated messages, # and then save those messages. # Every transaction in mempool is parsed independently. (DB is rolled back after each one.) mempool = [] for tx_hash in backend.getrawmempool(): # If already in mempool, copy to new one. if tx_hash in old_mempool_hashes: for message in old_mempool: if message['tx_hash'] == tx_hash: mempool.append((tx_hash, message)) # If already skipped, skip it again. elif tx_hash not in not_supported: # Else: list, parse and save it. try: with db: # List the fake block. cursor.execute('''INSERT INTO blocks( block_index, block_hash, block_time) VALUES(?,?,?)''', (config.MEMPOOL_BLOCK_INDEX, config.MEMPOOL_BLOCK_HASH, curr_time) ) # List transaction. try: # Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'} Is txindex enabled in Shellcoind?` mempool_tx_index = list_tx(db, None, block_index, curr_time, tx_hash, mempool_tx_index) except backend.addrindex.BackendRPCError: raise MempoolError # Parse transaction. cursor.execute('''SELECT * FROM transactions \ WHERE tx_hash = ?''', (tx_hash,)) transactions = list(cursor) if transactions: assert len(transactions) == 1 transaction = transactions[0] supported = parse_tx(db, transaction) if not supported: not_supported[tx_hash] = '' not_supported_sorted.append((block_index, tx_hash)) else: # If a transaction hasn’t been added to the # table `transactions`, then it’s not a # Shellparty transaction. not_supported[tx_hash] = '' not_supported_sorted.append((block_index, tx_hash)) raise MempoolError # Save transaction and side‐effects in memory. cursor.execute('''SELECT * FROM messages WHERE block_index = ?''', (config.MEMPOOL_BLOCK_INDEX,)) for message in list(cursor): mempool.append((tx_hash, message)) # Rollback. raise MempoolError except MempoolError: pass # Re‐write mempool messages to database. with db: cursor.execute('''DELETE FROM mempool''') for message in mempool: tx_hash, new_message = message new_message['tx_hash'] = tx_hash cursor.execute('''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''', (new_message)) # Wait mempool_initialised = True db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE) time.sleep(config.BACKEND_POLL_INTERVAL) cursor.close()