def on_post(self, req, resp): msg = "TODO" if req.media.get('start_hash'): block = Block.query(self.session).filter(Block.hash == req.media.get('start_hash')).first() else: block = Block.query(self.session).order_by(Block.id.asc()).first() if block and block.id != 1: harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) block_hash = block.parent_hash for nr in range(0, block.id - 1): try: block = harvester.add_block(block_hash) except BlockAlreadyAdded as e: print('Skipping {}'.format(block_hash)) block_hash = block.parent_hash if block.id == 0: break self.session.commit() resp.media = { 'status': 'success', 'data': { 'message': msg } } else: resp.status = falcon.HTTP_404 resp.media = {'result': 'Block not found'}
def balance_snapshot(self, account_id=None, block_start=1, block_end=None, block_ids=None): if account_id: accounts = [account_id] else: accounts = [account.id for account in Account.query(self.session)] harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE ) if block_ids: block_range = block_ids else: if block_end is None: # Set block end to chaintip substrate = SubstrateInterface(url=SUBSTRATE_RPC_URL, runtime_config=RuntimeConfiguration()) block_end = substrate.get_block_number(substrate.get_chain_finalised_head()) block_range = range(block_start, block_end + 1) for block_id in block_range: for account in accounts: harvester.create_balance_snapshot(block_id, account) self.session.commit() return { 'message': 'Snapshop created', 'account_id': account_id, 'block_start': block_start, 'block_end': block_end, 'block_ids': block_ids }
def on_post(self, req, resp): block_hash = None if 'block_id' in req.media: block = Block.query(self.session).filter( Block.id == req.media.get('block_id')).first() elif req.media.get('block_hash'): block_hash = req.media.get('block_hash') block = Block.query( self.session).filter(Block.hash == block_hash).first() else: block = None resp.status = falcon.HTTP_BAD_REQUEST resp.media = { 'errors': ['Either block_hash or block_id should be supplied'] } if block: print('Sequencing #{} ...'.format(block.id)) harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) if block.id == 1: # Add genesis block parent_block = harvester.add_block(block.parent_hash) block_total = BlockTotal.query( self.session).filter_by(id=block.id).first() parent_block = Block.query( self.session).filter(Block.id == block.id - 1).first() parent_block_total = BlockTotal.query( self.session).filter_by(id=block.id - 1).first() if block_total: resp.status = falcon.HTTP_200 resp.media = {'result': 'already exists', 'blockId': block.id} else: if parent_block_total: parent_block_total = parent_block_total.asdict() if parent_block: parent_block = parent_block.asdict() harvester.sequence_block(block, parent_block, parent_block_total) self.session.commit() resp.status = falcon.HTTP_201 resp.media = { 'result': 'added', 'parentHash': block.parent_hash } else: resp.status = falcon.HTTP_404 resp.media = {'result': 'Block not found'}
def on_post(self, req, resp): sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') if sequencer_task.value is None: # 3. IF NOT RUNNING: set task id is status table sequencer_task.value = "123" sequencer_task.save(self.session) harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE ) result = harvester.start_sequencer() sequencer_task.value = None sequencer_task.save(self.session) # 4. IF RUNNING: check if task id is active else: # task_result = AsyncResult(sequencer_task) # task_result = {'status': task_result.status, 'result': task_result.result} sequencer_task.value = None sequencer_task.save(self.session) result = 'Busy' self.session.commit() resp.media = { 'result': result }
def start_sequencer(self): sequencer_task = Status.get_status(self.session, 'SEQUENCER_TASK_ID') if sequencer_task.value: task_result = AsyncResult(sequencer_task.value) if not task_result or task_result.ready(): sequencer_task.value = None sequencer_task.save(self.session) if sequencer_task.value is None: sequencer_task.value = self.request.id sequencer_task.save(self.session) harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) try: result = harvester.start_sequencer() except BlockIntegrityError as e: result = {'result': str(e)} sequencer_task.value = None sequencer_task.save(self.session) self.session.commit() # Check if analytics data need to be generated #start_generate_analytics.delay() return result else: return {'result': 'Sequencer already running'}
def rebuild_search_index(self): harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) harvester.rebuild_search_index() return {'result': 'search index rebuilt'}
def on_post(self, req, resp): block_hash = None if req.media.get('block_id'): substrate = SubstrateInterface( url=SUBSTRATE_RPC_URL, runtime_config=RuntimeConfiguration(), type_registry_preset=settings.TYPE_REGISTRY) block_hash = substrate.get_block_hash(req.media.get('block_id')) elif req.media.get('block_hash'): block_hash = req.media.get('block_hash') else: resp.status = falcon.HTTP_BAD_REQUEST resp.media = { 'errors': ['Either block_hash or block_id should be supplied'] } if block_hash: print('Processing {} ...'.format(block_hash)) harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE + '-mst') block = Block.query( self.session).filter(Block.hash == block_hash).first() if block: resp.status = falcon.HTTP_200 resp.media = { 'result': 'already exists', 'parentHash': block.parent_hash } else: amount = req.media.get('amount', 1) for nr in range(0, amount): try: block = harvester.add_block(block_hash) except BlockAlreadyAdded as e: print('Skipping {}'.format(block_hash)) block_hash = block.parent_hash if block.id == 0: break self.session.commit() resp.status = falcon.HTTP_201 resp.media = { 'result': 'added', 'parentHash': block.parent_hash } else: resp.status = falcon.HTTP_404 resp.media = {'result': 'Block not found'}
def on_post(self, req, resp): harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) try: result = harvester.integrity_checks() except BlockIntegrityError as e: result = str(e) resp.media = {'result': result}
def rebuilding_search_index(self, search_index_id, truncate=False): if truncate: # Clear search index table self.session.execute('delete from analytics_search_index where index_type_id={}'.format(search_index_id)) self.session.commit() harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.rebuild_search_index(search_index_id) return {'result': 'index rebuilt'}
def on_post(self, req, resp): harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) block = Block.query(self.session).get(1) if block: result = harvester.process_genesis(block=block) else: result = 'Block #1 required to process genesis' self.session.commit() resp.media = {'result': result}
def on_post(self, req, resp): resp.status = falcon.HTTP_200 substrate = SubstrateInterface(SUBSTRATE_RPC_URL) head_hash = substrate.get_chain_head() substrate.init_runtime(head_hash) runtime = Runtime.query(self.session).get(substrate.runtime_version) if runtime: return harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) spec_version = substrate.runtime_version harvester.process_metadata(spec_version, head_hash) self.session.commit() resp.media = {'status': 'success', 'data': {}}
def update_balances_in_block(self, block_id): harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) harvester.create_full_balance_snaphot(block_id) harvester.create_full_asset_balance_snaphot(block_id) self.session.commit() harvester.update_account_balances() self.session.commit() return 'Snapshot created for block {}'.format(block_id)
def sequence_block_recursive(self, parent_block_data, parent_sequenced_block_data=None): harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.metadata_store = self.metadata_store for nr in range(0, 10): if not parent_sequenced_block_data: # No block ever sequenced, check if chain is at genesis state block = Block.query(self.session).order_by('id').first() if block.id == 1: # Add genesis block block = harvester.add_block(block.parent_hash) if block.id != 0: return {'error': 'Chain not at genesis'} harvester.process_genesis(block) block_id = 0 else: block_id = parent_sequenced_block_data['id'] + 1 block = Block.query(self.session).get(block_id) if block: try: sequenced_block = harvester.sequence_block( block, parent_block_data, parent_sequenced_block_data) self.session.commit() parent_block_data = block.asdict() parent_sequenced_block_data = sequenced_block.asdict() if nr == 9 or not sequenced_block: if sequenced_block: if nr == 9: sequence_block_recursive.delay( parent_block_data, parent_sequenced_block_data) return {'processedBlockId': block.id, 'amount': nr + 1} except IntegrityError as e: return { 'error': 'Sequencer already started', 'exception': str(e) } else: return {'error': 'Block {} not found'.format(block_id)}
def rebuild_account_info_snapshot(self): harvester = PolkascanHarvesterService( db_session=self.session, type_registry=TYPE_REGISTRY, type_registry_file=TYPE_REGISTRY_FILE) last_full_snapshot_block_nr = 0 self.session.execute('truncate table {}'.format( AccountInfoSnapshot.__tablename__)) for account_id, block_id in self.session.query( SearchIndex.account_id, SearchIndex.block_id ).filter(SearchIndex.block_id >= settings.BALANCE_SYSTEM_ACCOUNT_MIN_BLOCK ).order_by('block_id').group_by( SearchIndex.account_id, SearchIndex.block_id).yield_per(1000): if block_id > last_full_snapshot_block_nr + settings.BALANCE_FULL_SNAPSHOT_INTERVAL: last_full_snapshot_block_nr = block_id - block_id % settings.BALANCE_FULL_SNAPSHOT_INTERVAL harvester.create_full_balance_snaphot(last_full_snapshot_block_nr) self.session.commit() else: harvester.create_balance_snapshot(block_id, account_id) self.session.commit() # set balances according to most recent snapshot account_info = self.session.execute(""" select a.account_id, a.balance_total, a.balance_free, a.balance_reserved, a.nonce from data_account_info_snapshot as a inner join ( select account_id, max(block_id) as max_block_id from data_account_info_snapshot group by account_id ) as b on a.account_id = b.account_id and a.block_id = b.max_block_id """) for account_id, balance_total, balance_free, balance_reserved, nonce in account_info: Account.query(self.session).filter_by(id=account_id).update( { Account.balance_total: balance_total, Account.balance_free: balance_free, Account.balance_reserved: balance_reserved, Account.nonce: nonce, }, synchronize_session='fetch') self.session.commit() return {'result': 'account info snapshots rebuilt'}
def dealWithForks(self, shard_num, bid=None, substrate_url=None): shard_num = str(shard_num) if len(shard_num) == 2: shard_num = int(shard_num[1:2]) print(shard_num) else: shard_num = int(shard_num) print( '== dealWithForks substrate_url* {} *shardnum=*{} *==start_block_num=*{}*' .format(substrate_url, shard_num, bid)) substrate = SubstrateInterface(substrate_url) # self.session.execute('delete from data_block where shard_num = %(shard_num)s ',shard_num=shard_num) harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.metadata_store = self.metadata_store try: nr = 0 min_bid = find(bid, shard_num, substrate) # min_bid = 5 # bid = 7 if (bid - min_bid) <= 1: return { 'result': 'dealWithForks from {} to {} blocks check by shardnum of {}'. format(min_bid, bid, shard_num), 'status': '(bid - min_bid) <= 1,do nothing!' } self.session.query(Block).filter(Block.shard_num == shard_num, Block.bid > min_bid, Block.bid < bid).delete() self.session.query(Extrinsic).filter( Extrinsic.shard_num == shard_num, Extrinsic.block_id > min_bid, Extrinsic.block_id < bid).delete() self.session.query(Log).filter(Log.shard_num == shard_num, Log.block_id > min_bid, Log.block_id < bid).delete() self.session.query(Event).filter(Event.shard_num == shard_num, Event.block_id > min_bid, Event.block_id < bid).delete() for nr in range(min_bid + 1, bid): blocka = harvester.add_block(substrate.get_block_hash(nr), substrate_url) if blocka: print( '== Added sucess dealWithForks substrate_url* {} *shardnum=*{} *==start_block_num=*{}*' .format(substrate_url, shard_num, nr)) self.session.commit() except BlockAlreadyAdded as e: print('. dealWithForks Skipped {} '.format(nr)) except IntegrityError as e: print('.dealWithForks Skipped duplicate {}=={} '.format(nr, e)) except Exception as exc: print('!dealWithForks ERROR adding {}'.format(nr)) raise self.retry(exc=exc, countdown=60, max_retries=5) return { 'result': 'dealWithForks from {} to {} blocks check by shardnum of {}'.format( min_bid, bid, shard_num), 'dealWithForks_status': 'true' }
def start_harvester(self, check_gaps=False, shard=None): shard = self.request.args[0] if shard is None: raise HarvesterNotshardParamsError( 'params shard is missing.. stopping harvester ') print("start_harvester") substrate_url = SHARDS_TABLE[shard] print('== start_harvester substrate_url {} =='.format(substrate_url)) substrate = SubstrateInterface(substrate_url) n = Block.query(self.session).filter_by(bid=1).count() if n < 4: print('waiting init task completed! count().n: {} '.format(n)) return {'result': 'waiting init task completed! '} block_sets = [] harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.metadata_store = self.metadata_store start_block_hash = substrate.get_chain_head() end_block_hash = None r = 10 block_nr = substrate.get_block_number(start_block_hash) max_block = Block.query(self.session).filter_by( shard_num=shard.split(".")[1]).order_by(Block.bid.desc()).first() print('start block_nr {} =='.format(block_nr)) print('start max_block {} =='.format(max_block.bid)) if block_nr - max_block.bid < 10: r = block_nr - max_block.bid print('current range r: {} =='.format(max_block.bid)) try: for nr in range(1, r + 1): block_hash = substrate.get_block_hash(max_block.bid + nr) if harvester.add_block(block_hash, substrate_url): print('start_harvester+ Added {} '.format(block_hash)) self.session.commit() # Update persistent metadata store in Celery task self.metadata_store = harvester.metadata_store except BlockAlreadyAdded as e: print('. Skipped {} '.format(block_hash)) except IntegrityError as e: print('. Skipped duplicate {}=={} '.format(block_hash, e)) except Exception as exc: print('! ERROR adding {}'.format(block_hash)) raise HarvesterCouldNotAddBlock(block_hash) from exc block_sets.append({ 'start_block_hash': start_block_hash, 'end_block_hash': end_block_hash }) return { 'result': 'Yee data Synchronization job SUCCESS', 'block_sets': block_sets, 'result': 'Synch data from {} to {} blocks check by shardnum of {}'.format( max_block.bid + 1, r + max_block.bid + 1, shard) }
def accumulate_block_recursive(self, block_hash, end_block_hash=None, substrate_url=None): print( 'start accumulate_block_recursive block_hash {} =='.format(block_hash)) print('start accumulate_block_recursive substrate_url {} =='.format( substrate_url)) harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.metadata_store = self.metadata_store # If metadata store isn't initialized yet, perform some tests if not substrate_url: return # shard_num = NUM[substrate_url] # print('start accumulate_block_recursive shard_num {} =='.format(shard_num)) substrate = SubstrateInterface(substrate_url) block_nr = substrate.get_block_number(block_hash) block = None max_sequenced_block_id = False block_one = None add_count = 0 try: for nr in range(0, block_nr + 1): if not block or block.bid > 0: # Process block blocka = harvester.add_block(block_hash, substrate_url) if blocka: print('+ Added {} '.format(block_hash)) add_count += 1 self.session.commit() block = blocka # Break loop if targeted end block hash is reached if block_hash == end_block_hash or block.bid == 0: block_one = block break # Continue with parent block hash block_hash = block.parent_hash # Update persistent metadata store in Celery task self.metadata_store = harvester.metadata_store harvester.process_shard_genesis(block_one, substrate_url) # if block_hash != end_block_hash and block and block.bid > 0: # accumulate_block_recursive.delay(block.parent_hash, end_block_hash) except BlockAlreadyAdded as e: print('. Skipped {} '.format(block_hash)) except IntegrityError as e: print('. Skipped duplicate {}=={} '.format(block_hash, e)) except Exception as exc: print('! ERROR adding {}'.format(block_hash)) raise HarvesterCouldNotAddBlock(block_hash) from exc return { 'result': '{} blocks added'.format(add_count), 'lastAddedBlockHash': block_hash, 'sequencerStartedFrom': max_sequenced_block_id }
def accumulate_block_recursive(self, block_hash, end_block_hash=None): harvester = PolkascanHarvesterService(self.session, type_registry=TYPE_REGISTRY) harvester.metadata_store = self.metadata_store # If metadata store isn't initialized yet, perform some tests if not harvester.metadata_store: print('Init: create entrypoints') # Check if blocks exists max_block_id = self.session.query(func.max(Block.id)).one()[0] if not max_block_id: # Speed up accumulating by creating several entry points substrate = SubstrateInterface(SUBSTRATE_RPC_URL) block_nr = substrate.get_block_number(block_hash) if block_nr > 100: for entry_point in range(0, block_nr, block_nr // 4)[1:-1]: entry_point_hash = substrate.get_block_hash(entry_point) accumulate_block_recursive.delay(entry_point_hash) block = None max_sequenced_block_id = False add_count = 0 try: for nr in range(0, 10): if not block or block.id > 0: # Process block block = harvester.add_block(block_hash) print('+ Added {} '.format(block_hash)) add_count += 1 self.session.commit() # Break loop if targeted end block hash is reached if block_hash == end_block_hash or block.id == 0: break # Continue with parent block hash block_hash = block.parent_hash # Update persistent metadata store in Celery task self.metadata_store = harvester.metadata_store if block_hash != end_block_hash and block and block.id > 0: accumulate_block_recursive.delay(block.parent_hash, end_block_hash) except BlockAlreadyAdded as e: print('. Skipped {} '.format(block_hash)) start_sequencer.delay() except IntegrityError as e: print('. Skipped duplicate {} '.format(block_hash)) except Exception as exc: print('! ERROR adding {}'.format(block_hash)) raise HarvesterCouldNotAddBlock(block_hash) from exc return { 'result': '{} blocks added'.format(add_count), 'lastAddedBlockHash': block_hash, 'sequencerStartedFrom': max_sequenced_block_id }