def continuously_index(self) -> None: """ Runs the indexer to fill in transactions in the past and new ones as they come in """ our_chainhead = 0 # Block.get_latest_block_number() network_chainhead = self.blockchain.get_latest_block_number() # Round up as it never hurts to do more than necessary blocks_per_thread = math.ceil( (network_chainhead - our_chainhead) / self.block_threads) for i in range(self.block_threads): start = our_chainhead + (i * blocks_per_thread) end = our_chainhead + ((i + 1) * blocks_per_thread) thread = Thread(target=self.index, args=(start, end), daemon=True) thread.start() while True: # wait a lil bit for more blocks to come in time.sleep(self.chainhead_sleep_time_s) previous = network_chainhead network_chainhead = self.blockchain.get_latest_block_number() # check if our old chainhead is still correct, if not... fix reorg if Block.has_block(previous): block = Block.get_block(previous) our_block_hash = block.get_hash().get() network_block = self.blockchain.get_block(previous) network_block_hash = network_block.get_hash().get() if network_block_hash != our_block_hash: previous = self.handle_reorg(previous) thread = Thread(target=self.index, args=(previous, network_chainhead), daemon=True) thread.start()
def index(self, start_block: int, end_block: int) -> None: """ Gets blockchain data for the range of blocks from start_block to the current chainhead :param start_block: The block to start indexing at :param end_block the block to end indexing at """ logging.info(f"Indexing from block {start_block} to block {end_block}") for block_number in range(start_block, end_block): if not Block.has_block(block_number): try: block: Block = self.blockchain.get_block(block_number) logging.info( f"Indexing {len(block.get_transactions())} transactions in block {block_number}" ) for tx in block.get_transactions(): tx.save() block.save() except Exception as e: continue else: logging.info(f"Already indexed block {block_number}")