def on_get(self, req, resp): sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') integrity_head = Status.get_status(self.session, 'INTEGRITY_HEAD') sequencer_head = self.session.query(func.max(BlockTotal.id)).one()[0] best_block = Block.query(self.session).filter_by( id=self.session.query(func.max(Block.id)).one()[0]).first() if best_block: best_block_datetime = best_block.datetime.replace(tzinfo=pytz.UTC).timestamp() * 1000 best_block_nr = best_block.id else: best_block_datetime = None best_block_nr = None substrate = SubstrateInterface(SUBSTRATE_RPC_URL) chain_head_block_id = substrate.get_block_number(substrate.get_chain_head()) chain_finalized_block_id = substrate.get_block_number(substrate.get_chain_finalised_head()) resp.media = { 'best_block_datetime': best_block_datetime, 'best_block_nr': best_block_nr, 'sequencer_task': sequencer_task.value, 'sequencer_head': sequencer_head, 'integrity_head': int(integrity_head.value), 'chain_head_block_id': chain_head_block_id, 'chain_finalized_block_id': chain_finalized_block_id }
def on_post(self, req, resp): sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') if sequencer_task.value is None: # 3. IF NOT RUNNING: set task id is status table sequencer_task.value = "123" sequencer_task.save(self.session) harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE ) result = harvester.start_sequencer() sequencer_task.value = None sequencer_task.save(self.session) # 4. IF RUNNING: check if task id is active else: # task_result = AsyncResult(sequencer_task) # task_result = {'status': task_result.status, 'result': task_result.result} sequencer_task.value = None sequencer_task.save(self.session) result = 'Busy' self.session.commit() resp.media = { 'result': result }
def start_sequencer(self): sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') if sequencer_task.value: task_result = AsyncResult(sequencer_task.value) if not task_result or task_result.ready(): sequencer_task.value = None sequencer_task.save(self.session) if sequencer_task.value is None: sequencer_task.value = self.request.id sequencer_task.save(self.session) harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) try: result = harvester.start_sequencer() except BlockIntegrityError as e: result = {'result': str(e)} sequencer_task.value = None sequencer_task.save(self.session) self.session.commit() # Check if analytics data need to be generated #start_generate_analytics.delay() return result else: return {'result': 'Sequencer already running'}
def on_post(self, req, resp): self.session.execute('''TRUNCATE TABLE data_block_total''') self.session.execute('''TRUNCATE TABLE data_market_history_1m''') self.session.execute('''TRUNCATE TABLE data_market_history_5m''') self.session.execute('''TRUNCATE TABLE data_market_history_1h''') self.session.execute('''TRUNCATE TABLE data_market_history_1d''') self.session.execute('''TRUNCATE TABLE data_session''') self.session.execute('''TRUNCATE TABLE data_session_total''') sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') sequencer_task.value = None sequencer_task.save(self.session) self.session.commit() resp.media = {'status': 'success', 'data': {'message': ''}}
def start_sequencer(self): integrity_status = self.integrity_checks() self.db_session.commit() block_nr = None integrity_head = Status.get_status(self.db_session, 'INTEGRITY_HEAD') if not integrity_head.value: integrity_head.value = 0 # 3. Check sequence head sequencer_head = self.db_session.query(func.max(BlockTotal.id)).one()[0] if sequencer_head is None: sequencer_head = -1 # Start sequencing process sequencer_parent_block = BlockTotal.query(self.db_session).filter_by(id=sequencer_head).first() parent_block = Block.query(self.db_session).filter_by(id=sequencer_head).first() for block_nr in range(sequencer_head + 1, int(integrity_head.value) + 1): if block_nr == 0: # No block ever sequenced, check if chain is at genesis state assert (not sequencer_parent_block) block = Block.query(self.db_session).order_by('id').first() if not block: self.db_session.commit() return {'error': 'Chain not at genesis'} if block.id == 1: # Add genesis block block = self.add_block(block.parent_hash) if block.id != 0: self.db_session.commit() return {'error': 'Chain not at genesis'} self.process_genesis(block) sequencer_parent_block_data = None parent_block_data = None else: block_id = sequencer_parent_block.id + 1 assert (block_id == block_nr) block = Block.query(self.db_session).get(block_nr) if not block: self.db_session.commit() return {'result': 'Finished at #{}'.format(sequencer_parent_block.id)} sequencer_parent_block_data = sequencer_parent_block.asdict() parent_block_data = parent_block.asdict() sequenced_block = self.sequence_block(block, parent_block_data, sequencer_parent_block_data) self.db_session.commit() parent_block = block sequencer_parent_block = sequenced_block if block_nr: return {'result': 'Finished at #{}'.format(block_nr)} else: return {'result': 'Nothing to sequence'}
def integrity_checks(self): # 1. Check finalized head substrate = SubstrateInterface(settings.SUBSTRATE_RPC_URL) if settings.FINALIZATION_BY_BLOCK_CONFIRMATIONS > 0: finalized_block_hash = substrate.get_chain_head() finalized_block_number = max( substrate.get_block_number(finalized_block_hash) - settings.FINALIZATION_BY_BLOCK_CONFIRMATIONS, 0 ) else: finalized_block_hash = substrate.get_chain_finalised_head() finalized_block_number = substrate.get_block_number(finalized_block_hash) # 2. Check integrity head integrity_head = Status.get_status(self.db_session, 'INTEGRITY_HEAD') if not integrity_head.value: # Only continue if block #1 exists if Block.query(self.db_session).filter_by(id=1).count() == 0: raise BlockIntegrityError('Chain not at genesis') integrity_head.value = 0 else: integrity_head.value = int(integrity_head.value) start_block_id = max(integrity_head.value - 1, 0) end_block_id = finalized_block_number chunk_size = 1000 parent_block = None if start_block_id < end_block_id: # Continue integrity check # print('== Start integrity checks from {} to {} =='.format(start_block_id, end_block_id)) for block_nr in range(start_block_id, end_block_id, chunk_size): # TODO replace limit with filter_by block range block_range = Block.query(self.db_session).order_by('id')[block_nr:block_nr + chunk_size] for block in block_range: if parent_block: if block.id != parent_block.id + 1: # Save integrity head if block hash of parent matches with hash in node if parent_block.hash == substrate.get_block_hash(integrity_head.value): integrity_head.save(self.db_session) self.db_session.commit() raise BlockIntegrityError('Block #{} is missing.. stopping check '.format(parent_block.id + 1)) elif block.parent_hash != parent_block.hash: self.process_reorg_block(parent_block) self.process_reorg_block(block) self.remove_block(block.hash) self.remove_block(parent_block.hash) self.db_session.commit() self.add_block(substrate.get_block_hash(block.id)) self.add_block(substrate.get_block_hash(parent_block.id)) self.db_session.commit() integrity_head.value = parent_block.id - 1 # Save integrity head if block hash of parent matches with hash in node #if parent_block.parent_hash == substrate.get_block_hash(integrity_head.value): integrity_head.save(self.db_session) self.db_session.commit() raise BlockIntegrityError('Block #{} failed integrity checks, Re-adding #{}.. '.format(parent_block.id, block.id)) else: integrity_head.value = block.id parent_block = block if block.id == end_block_id: break if parent_block: if parent_block.hash == substrate.get_block_hash(int(integrity_head.value)): integrity_head.save(self.db_session) self.db_session.commit() return {'integrity_head': integrity_head.value}