def get_item(self, item_id): spec_version, module_id, name = item_id.split('-') return RuntimeStorage.query(self.session).filter_by( spec_version=spec_version, module_id=module_id, name=name ).first()
def get_relationships(self, include_list, item): relationships = {} if 'calls' in include_list: relationships['calls'] = RuntimeCall.query(self.session).filter_by( spec_version=item.spec_version, module_id=item.module_id).order_by('lookup', 'id') if 'events' in include_list: relationships['events'] = RuntimeEvent.query( self.session).filter_by(spec_version=item.spec_version, module_id=item.module_id).order_by( 'lookup', 'id') if 'storage' in include_list: relationships['storage'] = RuntimeStorage.query( self.session).filter_by( spec_version=item.spec_version, module_id=item.module_id).order_by('name') if 'constants' in include_list: relationships['constants'] = RuntimeConstant.query( self.session).filter_by( spec_version=item.spec_version, module_id=item.module_id).order_by('name') if 'errors' in include_list: relationships['errors'] = RuntimeErrorMessage.query( self.session).filter_by( spec_version=item.spec_version, module_id=item.module_id).order_by('name').order_by( RuntimeErrorMessage.index) return relationships
def serialize_item(self, item): substrate = SubstrateInterface(SUBSTRATE_RPC_URL) data = item.serialize() storage_call = RuntimeStorage.query(self.session).filter_by( module_id='system', name='Account', ).order_by(RuntimeStorage.spec_version.desc()).first() print(storage_call) account = substrate.get_storage( block_hash=None, module='System', function='Account', params=item.id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=SUBSTRATE_METADATA_VERSION ) print('----------------') print(account) data['attributes']['free_balance'] = account['data']['free'] data['attributes']['reserved_balance'] = account['data']['reserved'] data['attributes']['nonce'] = account['nonce'] return data
def accumulation_hook(self, db_session): if self.extrinsic.success: vote_account_id = self.extrinsic.address stash_account_id = self.extrinsic.address # TODO refactor when new runtime aware substrateinterface # TODO make substrateinterface part of processor over websockets # Get balance of stash_account substrate = SubstrateInterface(SUBSTRATE_RPC_URL) storage_call = RuntimeStorage.query(db_session).filter_by( module_id='balances', name='FreeBalance', ).order_by(RuntimeStorage.spec_version.desc()).first() stash = substrate.get_storage( block_hash=self.block.hash, module='Balances', function='FreeBalance', params=stash_account_id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher) vote_audit = DemocracyVoteAudit( block_id=self.extrinsic.block_id, extrinsic_idx=self.extrinsic.extrinsic_idx, type_id=DEMOCRACY_VOTE_AUDIT_TYPE_NORMAL, data={ 'vote_account_id': vote_account_id, 'stash_account_id': stash_account_id, 'stash': stash }) # Process parameters for param in self.extrinsic.params: if param.get('name') == 'ref_index': vote_audit.democracy_referendum_id = param.get('value') if param.get('name') == 'vote': vote_audit.data['vote_raw'] = param.get('value') vote_audit.data['vote_yes'] = bool( vote_audit.data['vote_raw']) vote_audit.data['vote_no'] = not bool( vote_audit.data['vote_raw']) # Determine conviction and weight of vote vote_audit.data['conviction'] = vote_audit.data[ 'vote_raw'] & Conviction.CONVICTION_MASK vote_audit.data['vote_yes_weighted'] = int( vote_audit.data['vote_yes']) * vote_audit.data['stash'] vote_audit.data['vote_no_weighted'] = int( vote_audit.data['vote_no']) * vote_audit.data['stash'] vote_audit.save(db_session)
def accumulation_hook(self, db_session): # Check event requirements if len(self.event.attributes) == 2 and \ self.event.attributes[0]['type'] == 'ReferendumIndex' and \ self.event.attributes[1]['type'] == 'VoteThreshold': # Retrieve proposal from storage substrate = SubstrateInterface(SUBSTRATE_RPC_URL) storage_call = RuntimeStorage.query(db_session).filter_by( module_id='democracy', name='ReferendumInfoOf', ).order_by(RuntimeStorage.spec_version.desc()).first() proposal = substrate.get_storage( block_hash=self.block.hash, module='Democracy', function='ReferendumInfoOf', params=self.event.attributes[0]['valueRaw'], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata=self.metadata ) referendum_audit = DemocracyReferendumAudit( democracy_referendum_id=self.event.attributes[0]['value'], block_id=self.event.block_id, extrinsic_idx=self.event.extrinsic_idx, event_idx=self.event.event_idx, type_id=DEMOCRACY_REFERENDUM_AUDIT_TYPE_STARTED, data={ 'vote_threshold': self.event.attributes[1]['value'], 'ReferendumIndex': self.event.attributes[0]['valueRaw'], 'proposal': proposal } ) referendum_audit.save(db_session)
def process_metadata(self, spec_version, block_hash): # Check if metadata already in store if spec_version not in self.metadata_store: print('Metadata: CACHE MISS', spec_version) runtime_version_data = self.substrate.get_block_runtime_version(block_hash) runtime = Runtime.query(self.db_session).get(spec_version) if runtime: self.metadata_store[spec_version] = self.substrate.get_block_metadata(block_hash=block_hash) else: self.db_session.begin(subtransactions=True) try: # Store metadata in database runtime = Runtime( id=spec_version, impl_name=runtime_version_data["implName"], impl_version=runtime_version_data["implVersion"], spec_name=runtime_version_data["specName"], spec_version=spec_version, json_metadata=str(self.substrate.metadata_decoder.data), json_metadata_decoded=self.substrate.metadata_decoder.value, apis=runtime_version_data["apis"], authoring_version=runtime_version_data["authoringVersion"], count_call_functions=0, count_events=0, count_modules=len(self.substrate.metadata_decoder.metadata.modules), count_storage_functions=0, count_constants=0, count_errors=0 ) runtime.save(self.db_session) print('store version to db', self.substrate.metadata_decoder.version) for module in self.substrate.metadata_decoder.metadata.modules: # Check if module exists if RuntimeModule.query(self.db_session).filter_by( spec_version=spec_version, module_id=module.get_identifier() ).count() == 0: module_id = module.get_identifier() else: module_id = '{}_1'.format(module.get_identifier()) # Storage backwards compt check if module.storage and isinstance(module.storage, list): storage_functions = module.storage elif module.storage and isinstance(getattr(module.storage, 'value'), dict): storage_functions = module.storage.items else: storage_functions = [] runtime_module = RuntimeModule( spec_version=spec_version, module_id=module_id, prefix=module.prefix, name=module.name, count_call_functions=len(module.calls or []), count_storage_functions=len(storage_functions), count_events=len(module.events or []), count_constants=len(module.constants or []), count_errors=len(module.errors or []), ) runtime_module.save(self.db_session) # Update totals in runtime runtime.count_call_functions += runtime_module.count_call_functions runtime.count_events += runtime_module.count_events runtime.count_storage_functions += runtime_module.count_storage_functions runtime.count_constants += runtime_module.count_constants runtime.count_errors += runtime_module.count_errors if len(module.calls or []) > 0: for idx, call in enumerate(module.calls): runtime_call = RuntimeCall( spec_version=spec_version, module_id=module_id, call_id=call.get_identifier(), index=idx, name=call.name, lookup=call.lookup, documentation='\n'.join(call.docs), count_params=len(call.args) ) runtime_call.save(self.db_session) for arg in call.args: runtime_call_param = RuntimeCallParam( runtime_call_id=runtime_call.id, name=arg.name, type=arg.type ) runtime_call_param.save(self.db_session) if len(module.events or []) > 0: for event_index, event in enumerate(module.events): runtime_event = RuntimeEvent( spec_version=spec_version, module_id=module_id, event_id=event.name, index=event_index, name=event.name, lookup=event.lookup, documentation='\n'.join(event.docs), count_attributes=len(event.args) ) runtime_event.save(self.db_session) for arg_index, arg in enumerate(event.args): runtime_event_attr = RuntimeEventAttribute( runtime_event_id=runtime_event.id, index=arg_index, type=arg ) runtime_event_attr.save(self.db_session) if len(storage_functions) > 0: for idx, storage in enumerate(storage_functions): # Determine type type_hasher = None type_key1 = None type_key2 = None type_value = None type_is_linked = None type_key2hasher = None if storage.type.get('PlainType'): type_value = storage.type.get('PlainType') elif storage.type.get('MapType'): type_hasher = storage.type['MapType'].get('hasher') type_key1 = storage.type['MapType'].get('key') type_value = storage.type['MapType'].get('value') type_is_linked = storage.type['MapType'].get('isLinked', False) elif storage.type.get('DoubleMapType'): type_hasher = storage.type['DoubleMapType'].get('hasher') type_key1 = storage.type['DoubleMapType'].get('key1') type_key2 = storage.type['DoubleMapType'].get('key2') type_value = storage.type['DoubleMapType'].get('value') type_key2hasher = storage.type['DoubleMapType'].get('key2Hasher') runtime_storage = RuntimeStorage( spec_version=spec_version, module_id=module_id, index=idx, name=storage.name, lookup=None, default=storage.fallback, modifier=storage.modifier, type_hasher=type_hasher, storage_key=xxh128(module.prefix.encode()) + xxh128(storage.name.encode()), type_key1=type_key1, type_key2=type_key2, type_value=type_value, type_is_linked=type_is_linked, type_key2hasher=type_key2hasher, documentation='\n'.join(storage.docs) ) runtime_storage.save(self.db_session) if len(module.constants or []) > 0: for idx, constant in enumerate(module.constants): # Decode value try: value_obj = ScaleDecoder.get_decoder_class( constant.type, ScaleBytes(constant.constant_value) ) value_obj.decode() value = value_obj.serialize() except ValueError: value = constant.constant_value except RemainingScaleBytesNotEmptyException: value = constant.constant_value except NotImplementedError: value = constant.constant_value if type(value) is list or type(value) is dict: value = json.dumps(value) runtime_constant = RuntimeConstant( spec_version=spec_version, module_id=module_id, index=idx, name=constant.name, type=constant.type, value=value, documentation='\n'.join(constant.docs) ) runtime_constant.save(self.db_session) if len(module.errors or []) > 0: for idx, error in enumerate(module.errors): runtime_error = RuntimeErrorMessage( spec_version=spec_version, module_id=module_id, index=idx, name=error.name, documentation='\n'.join(error.docs) ) runtime_error.save(self.db_session) runtime.save(self.db_session) # Process types for runtime_type_data in list(self.substrate.get_type_registry(block_hash=block_hash).values()): runtime_type = RuntimeType( spec_version=runtime_type_data["spec_version"], type_string=runtime_type_data["type_string"], decoder_class=runtime_type_data["decoder_class"], is_primitive_core=runtime_type_data["is_primitive_core"], is_primitive_runtime=runtime_type_data["is_primitive_runtime"] ) runtime_type.save(self.db_session) self.db_session.commit() # Put in local store self.metadata_store[spec_version] = self.substrate.metadata_decoder except SQLAlchemyError as e: self.db_session.rollback()
def add_session(self, db_session, session_id): current_era = None validators = [] nominators = [] validation_session_lookup = {} substrate = SubstrateInterface(SUBSTRATE_RPC_URL) # Retrieve current era storage_call = RuntimeStorage.query(db_session).filter_by( module_id='staking', name='CurrentEra', spec_version=self.block.spec_version_id ).first() if storage_call: try: current_era = substrate.get_storage( block_hash=self.block.hash, module="Staking", function="CurrentEra", return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) except RemainingScaleBytesNotEmptyException: pass # Retrieve validators for new session from storage storage_call = RuntimeStorage.query(db_session).filter_by( module_id='session', name='Validators', spec_version=self.block.spec_version_id ).first() if storage_call: try: validators = substrate.get_storage( block_hash=self.block.hash, module="Session", function="Validators", return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or [] except RemainingScaleBytesNotEmptyException: pass # Retrieve all sessions in one call if not LEGACY_SESSION_VALIDATOR_LOOKUP: # Retrieve session account # TODO move to network specific data types storage_call = RuntimeStorage.query(db_session).filter_by( module_id='session', name='QueuedKeys', spec_version=self.block.spec_version_id ).first() if storage_call: try: validator_session_list = substrate.get_storage( block_hash=self.block.hash, module="Session", function="QueuedKeys", return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or [] except RemainingScaleBytesNotEmptyException: try: validator_session_list = substrate.get_storage( block_hash=self.block.hash, module="Session", function="QueuedKeys", return_scale_type='Vec<(ValidatorId, LegacyKeys)>', hasher=storage_call.type_hasher ) or [] except RemainingScaleBytesNotEmptyException: validator_session_list = substrate.get_storage( block_hash=self.block.hash, module="Session", function="QueuedKeys", return_scale_type='Vec<(ValidatorId, EdgewareKeys)>', hasher=storage_call.type_hasher ) or [] # build lookup dict validation_session_lookup = {} for validator_session_item in validator_session_list: session_key = '' if validator_session_item['keys'].get('grandpa'): session_key = validator_session_item['keys'].get('grandpa') if validator_session_item['keys'].get('ed25519'): session_key = validator_session_item['keys'].get('ed25519') validation_session_lookup[ validator_session_item['validator'].replace('0x', '')] = session_key.replace('0x', '') for rank_nr, validator_account in enumerate(validators): validator_stash = None validator_controller = None validator_ledger = {} validator_prefs = {} validator_session = '' exposure = {} if not LEGACY_SESSION_VALIDATOR_LOOKUP: validator_stash = validator_account.replace('0x', '') # Retrieve stash account storage_call = RuntimeStorage.query(db_session).filter_by( module_id='staking', name='Bonded', spec_version=self.block.spec_version_id ).first() if storage_call: try: validator_controller = substrate.get_storage( block_hash=self.block.hash, module="Staking", function="Bonded", params=validator_stash, return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or '' validator_controller = validator_controller.replace('0x', '') except RemainingScaleBytesNotEmptyException: pass # Retrieve session account validator_session = validation_session_lookup.get(validator_stash) else: validator_controller = validator_account.replace('0x', '') # Retrieve stash account storage_call = RuntimeStorage.query(db_session).filter_by( module_id='staking', name='Ledger', spec_version=self.block.spec_version_id ).first() if storage_call: try: validator_ledger = substrate.get_storage( block_hash=self.block.hash, module="Staking", function="Ledger", params=validator_controller, return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or {} validator_stash = validator_ledger.get('stash', '').replace('0x', '') except RemainingScaleBytesNotEmptyException: pass # Retrieve session account storage_call = RuntimeStorage.query(db_session).filter_by( module_id='session', name='NextKeyFor', spec_version=self.block.spec_version_id ).first() if storage_call: try: validator_session = substrate.get_storage( block_hash=self.block.hash, module="Session", function="NextKeyFor", params=validator_controller, return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or '' except RemainingScaleBytesNotEmptyException: pass validator_session = validator_session.replace('0x', '') # Retrieve validator preferences for stash account storage_call = RuntimeStorage.query(db_session).filter_by( module_id='staking', name='Validators', spec_version=self.block.spec_version_id ).first() if storage_call: try: validator_prefs = substrate.get_storage( block_hash=self.block.hash, module="Staking", function="Validators", params=validator_stash, return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or {'col1': {}, 'col2': {}} except RemainingScaleBytesNotEmptyException: pass # Retrieve nominators storage_call = RuntimeStorage.query(db_session).filter_by( module_id='staking', name='Stakers', spec_version=self.block.spec_version_id ).first() if storage_call: try: exposure = substrate.get_storage( block_hash=self.block.hash, module="Staking", function="Stakers", params=validator_stash, return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher ) or {} except RemainingScaleBytesNotEmptyException: pass if exposure.get('total'): bonded_nominators = exposure.get('total') - exposure.get('own') else: bonded_nominators = None session_validator = SessionValidator( session_id=session_id, validator_controller=validator_controller, validator_stash=validator_stash, bonded_total=exposure.get('total'), bonded_active=validator_ledger.get('active'), bonded_own=exposure.get('own'), bonded_nominators=bonded_nominators, validator_session=validator_session, rank_validator=rank_nr, unlocking=validator_ledger.get('unlocking'), count_nominators=len(exposure.get('others', [])), unstake_threshold=validator_prefs.get('col1', {}).get('unstakeThreshold'), commission=validator_prefs.get('col1', {}).get('validatorPayment') ) session_validator.save(db_session) # Store nominators for rank_nominator, nominator_info in enumerate(exposure.get('others', [])): nominator_stash = nominator_info.get('who').replace('0x', '') nominators.append(nominator_stash) session_nominator = SessionNominator( session_id=session_id, rank_validator=rank_nr, rank_nominator=rank_nominator, nominator_stash=nominator_stash, bonded=nominator_info.get('value'), ) session_nominator.save(db_session) # Store session session = Session( id=session_id, start_at_block=self.block.id + 1, created_at_block=self.block.id, created_at_extrinsic=self.event.extrinsic_idx, created_at_event=self.event.event_idx, count_validators=len(validators), count_nominators=len(set(nominators)), era=current_era ) session.save(db_session) # Retrieve previous session to calculate count_blocks prev_session = Session.query(db_session).filter_by(id=session_id - 1).first() if prev_session: count_blocks = self.block.id - prev_session.start_at_block + 1 else: count_blocks = self.block.id session_total = SessionTotal( id=session_id - 1, end_at_block=self.block.id, count_blocks=count_blocks ) session_total.save(db_session)
def serialize_item(self, item): data = item.serialize() # Get balance history account_info_snapshot = AccountInfoSnapshot.query( self.session).filter_by(account_id=item.id).order_by( AccountInfoSnapshot.block_id.desc())[:1000] data['attributes']['balance_history'] = [{ 'name': "Total balance", 'type': 'line', 'data': [[ item.block_id, float((item.balance_total or 0) / 10**settings.SUBSTRATE_TOKEN_DECIMALS) ] for item in reversed(account_info_snapshot)], }] if settings.USE_NODE_RETRIEVE_BALANCES == 'True': substrate = SubstrateInterface( url=settings.SUBSTRATE_RPC_URL, type_registry_preset=settings.TYPE_REGISTRY) if settings.SUBSTRATE_STORAGE_BALANCE == 'Account': storage_call = RuntimeStorage.query(self.session).filter_by( module_id='system', name='Account', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: account_data = substrate.get_storage( block_hash=None, module='System', function='Account', params=[item.id], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=settings.SUBSTRATE_METADATA_VERSION) if account_data: data['attributes']['free_balance'] = account_data[ 'data']['free'] data['attributes']['reserved_balance'] = account_data[ 'data']['reserved'] data['attributes'][ 'misc_frozen_balance'] = account_data['data'][ 'miscFrozen'] data['attributes'][ 'fee_frozen_balance'] = account_data['data'][ 'feeFrozen'] data['attributes']['nonce'] = account_data['nonce'] elif settings.SUBSTRATE_STORAGE_BALANCE == 'Balances.Account': storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='Account', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: account_data = substrate.get_storage( block_hash=None, module='Balances', function='Account', params=[item.id], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=settings.SUBSTRATE_METADATA_VERSION) if account_data: data['attributes']['balance_free'] = account_data[ 'free'] data['attributes']['balance_reserved'] = account_data[ 'reserved'] data['attributes'][ 'misc_frozen_balance'] = account_data['miscFrozen'] data['attributes'][ 'fee_frozen_balance'] = account_data['feeFrozen'] data['attributes']['nonce'] = None else: storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='FreeBalance', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes']['free_balance'] = substrate.get_storage( block_hash=None, module='Balances', function='FreeBalance', params=[item.id], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=settings.SUBSTRATE_METADATA_VERSION) storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='ReservedBalance', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes'][ 'reserved_balance'] = substrate.get_storage( block_hash=None, module='Balances', function='ReservedBalance', params=[item.id], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=settings. SUBSTRATE_METADATA_VERSION) storage_call = RuntimeStorage.query(self.session).filter_by( module_id='system', name='AccountNonce', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes']['nonce'] = substrate.get_storage( block_hash=None, module='System', function='AccountNonce', params=[item.id], return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=settings.SUBSTRATE_METADATA_VERSION) return data
def process_genesis(self, block): substrate = SubstrateInterface(SUBSTRATE_RPC_URL) # Set block time of parent block child_block = Block.query( self.db_session).filter_by(parent_hash=block.hash).first() block.set_datetime(child_block.datetime) # Retrieve genesis accounts storage_call = RuntimeStorage.query(self.db_session).filter_by( module_id='indices', name='NextEnumSet', spec_version=block.spec_version_id).first() if storage_call: genesis_account_page_count = substrate.get_storage( block_hash=block.hash, module="Indices", function="NextEnumSet", return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher) or 0 # Get Accounts on EnumSet storage_call = RuntimeStorage.query(self.db_session).filter_by( module_id='indices', name='EnumSet', spec_version=block.spec_version_id).first() if storage_call: block.count_accounts_new = 0 block.count_accounts = 0 for enum_set_nr in range(0, genesis_account_page_count + 1): account_index_u32 = U32() account_index_u32.encode(enum_set_nr) genesis_accounts = substrate.get_storage( block_hash=block.hash, module="Indices", function="EnumSet", params=account_index_u32.data.data.hex(), return_scale_type=storage_call.get_return_type(), hasher=storage_call.type_hasher) if genesis_accounts: block.count_accounts_new += len(genesis_accounts) block.count_accounts += len(genesis_accounts) for idx, account_id in enumerate(genesis_accounts): account_audit = AccountAudit( account_id=account_id.replace('0x', ''), block_id=block.id, extrinsic_idx=None, event_idx=None, type_id=ACCOUNT_AUDIT_TYPE_NEW) account_audit.save(self.db_session) account_index_id = enum_set_nr * 64 + idx account_index_audit = AccountIndexAudit( account_index_id=account_index_id, account_id=account_id.replace('0x', ''), block_id=block.id, extrinsic_idx=None, event_idx=None, type_id=ACCOUNT_INDEX_AUDIT_TYPE_NEW) account_index_audit.save(self.db_session) block.save(self.db_session) # Create initial session initial_session_event = NewSessionEventProcessor(block, Event(), None) initial_session_event.add_session(db_session=self.db_session, session_id=0)
def serialize_item(self, item): substrate = SubstrateInterface(SUBSTRATE_RPC_URL) data = item.serialize() if SUBSTRATE_STORAGE_BALANCE == 'Account': storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='Account', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: account_data = substrate.get_storage( block_hash=None, module='Balances', function='Account', params=item.id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=SUBSTRATE_METADATA_VERSION) if account_data: data['attributes']['free_balance'] = account_data['free'] data['attributes']['reserved_balance'] = account_data[ 'reserved'] else: storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='FreeBalance', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes']['free_balance'] = substrate.get_storage( block_hash=None, module='Balances', function='FreeBalance', params=item.id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=SUBSTRATE_METADATA_VERSION) storage_call = RuntimeStorage.query(self.session).filter_by( module_id='balances', name='ReservedBalance', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes']['reserved_balance'] = substrate.get_storage( block_hash=None, module='Balances', function='ReservedBalance', params=item.id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=SUBSTRATE_METADATA_VERSION) storage_call = RuntimeStorage.query(self.session).filter_by( module_id='system', name='AccountNonce', ).order_by(RuntimeStorage.spec_version.desc()).first() if storage_call: data['attributes']['nonce'] = substrate.get_storage( block_hash=None, module='System', function='AccountNonce', params=item.id, return_scale_type=storage_call.type_value, hasher=storage_call.type_hasher, metadata_version=SUBSTRATE_METADATA_VERSION) return data
def process_metadata(self, runtime_version_data, block_hash): spec_version = runtime_version_data.get('specVersion', 0) # Check if metadata already in store if spec_version not in self.metadata_store: print('Metadata: CACHE MISS', spec_version) runtime = Runtime.query(self.db_session).get(spec_version) if runtime: metadata_decoder = MetadataDecoder( ScaleBytes(runtime.json_metadata)) metadata_decoder.decode() self.metadata_store[spec_version] = metadata_decoder else: self.db_session.begin(subtransactions=True) try: # ==== Get block Metadata from Substrate ================== substrate = SubstrateInterface(SUBSTRATE_RPC_URL) metadata_decoder = substrate.get_block_metadata(block_hash) # Store metadata in database runtime = Runtime( id=spec_version, impl_name=runtime_version_data["implName"], impl_version=runtime_version_data["implVersion"], spec_name=runtime_version_data["specName"], spec_version=spec_version, json_metadata=str(metadata_decoder.data), json_metadata_decoded=metadata_decoder.value, apis=runtime_version_data["apis"], authoring_version=runtime_version_data[ "authoringVersion"], count_call_functions=0, count_events=0, count_modules=len(metadata_decoder.metadata.modules), count_storage_functions=0) runtime.save(self.db_session) print('store version to db', metadata_decoder.version) if not metadata_decoder.version: # Legacy V0 fallback for module in metadata_decoder.metadata.modules: runtime_module = RuntimeModule( spec_version=spec_version, module_id=module.get_identifier(), prefix=module.prefix, name=module.get_identifier(), count_call_functions=len(module.functions or []), count_storage_functions=len(module.storage or []), count_events=0) runtime_module.save(self.db_session) if len(module.functions or []) > 0: for idx, call in enumerate(module.functions): runtime_call = RuntimeCall( spec_version=spec_version, module_id=module.get_identifier(), call_id=call.get_identifier(), index=idx, name=call.name, lookup=call.lookup, documentation='\n'.join(call.docs), count_params=len(call.args)) runtime_call.save(self.db_session) for arg in call.args: runtime_call_param = RuntimeCallParam( runtime_call_id=runtime_call.id, name=arg.name, type=arg.type) runtime_call_param.save( self.db_session) # Check if type already registered in database self.process_metadata_type( arg.type, spec_version) for event_module in metadata_decoder.metadata.events_modules: for event_index, event in enumerate( event_module.events): runtime_event = RuntimeEvent( spec_version=spec_version, module_id=event_module.name, event_id=event.name, index=event_index, name=event.name, lookup=event.lookup, documentation='\n'.join(event.docs), count_attributes=len(event.args)) runtime_event.save(self.db_session) runtime_module.count_events += 1 for arg_index, arg in enumerate(event.args): runtime_event_attr = RuntimeEventAttribute( runtime_event_id=runtime_event.id, index=arg_index, type=arg) runtime_event_attr.save(self.db_session) runtime_module.save(self.db_session) else: for module in metadata_decoder.metadata.modules: # Check if module exists if RuntimeModule.query(self.db_session).filter_by( spec_version=spec_version, module_id=module.get_identifier()).count( ) == 0: module_id = module.get_identifier() else: module_id = '{}_1'.format( module.get_identifier()) # Storage backwards compt check if module.storage and isinstance( module.storage, list): storage_functions = module.storage elif module.storage and isinstance( getattr(module.storage, 'value'), dict): storage_functions = module.storage.items else: storage_functions = [] runtime_module = RuntimeModule( spec_version=spec_version, module_id=module_id, prefix=module.prefix, name=module.name, count_call_functions=len(module.calls or []), count_storage_functions=len(storage_functions), count_events=len(module.events or [])) runtime_module.save(self.db_session) # Update totals in runtime runtime.count_call_functions += runtime_module.count_call_functions runtime.count_events += runtime_module.count_events runtime.count_storage_functions += runtime_module.count_storage_functions if len(module.calls or []) > 0: for idx, call in enumerate(module.calls): runtime_call = RuntimeCall( spec_version=spec_version, module_id=module_id, call_id=call.get_identifier(), index=idx, name=call.name, lookup=call.lookup, documentation='\n'.join(call.docs), count_params=len(call.args)) runtime_call.save(self.db_session) for arg in call.args: runtime_call_param = RuntimeCallParam( runtime_call_id=runtime_call.id, name=arg.name, type=arg.type) runtime_call_param.save( self.db_session) # Check if type already registered in database self.process_metadata_type( arg.type, spec_version) if len(module.events or []) > 0: for event_index, event in enumerate( module.events): runtime_event = RuntimeEvent( spec_version=spec_version, module_id=module_id, event_id=event.name, index=event_index, name=event.name, lookup=event.lookup, documentation='\n'.join(event.docs), count_attributes=len(event.args)) runtime_event.save(self.db_session) for arg_index, arg in enumerate( event.args): runtime_event_attr = RuntimeEventAttribute( runtime_event_id=runtime_event.id, index=arg_index, type=arg) runtime_event_attr.save( self.db_session) if len(storage_functions) > 0: for idx, storage in enumerate( storage_functions): # Determine type type_hasher = None type_key1 = None type_key2 = None type_value = None type_is_linked = None type_key2hasher = None if storage.type.get('PlainType'): type_value = storage.type.get( 'PlainType') elif storage.type.get('MapType'): type_hasher = storage.type[ 'MapType'].get('hasher') type_key1 = storage.type[ 'MapType'].get('key') type_value = storage.type[ 'MapType'].get('value') type_is_linked = storage.type[ 'MapType'].get('isLinked', False) elif storage.type.get('DoubleMapType'): type_hasher = storage.type[ 'DoubleMapType'].get('hasher') type_key1 = storage.type[ 'DoubleMapType'].get('key1') type_key2 = storage.type[ 'DoubleMapType'].get('key2') type_value = storage.type[ 'DoubleMapType'].get('value') type_key2hasher = storage.type[ 'DoubleMapType'].get('key2Hasher') runtime_storage = RuntimeStorage( spec_version=spec_version, module_id=module_id, index=idx, name=storage.name, lookup=None, default=storage.fallback, modifier=storage.modifier, type_hasher=type_hasher, type_key1=type_key1, type_key2=type_key2, type_value=type_value, type_is_linked=type_is_linked, type_key2hasher=type_key2hasher, documentation='\n'.join(storage.docs)) runtime_storage.save(self.db_session) # Check if types already registered in database self.process_metadata_type( type_value, spec_version) if type_key1: self.process_metadata_type( type_key1, spec_version) if type_key2: self.process_metadata_type( type_key2, spec_version) if len(module.constants or []) > 0: for idx, constant in enumerate( module.constants): # Decode value try: value_obj = ScaleDecoder.get_decoder_class( constant.type, ScaleBytes( constant.constant_value)) value_obj.decode() value = value_obj.serialize() except ValueError: value = constant.constant_value except RemainingScaleBytesNotEmptyException: value = constant.constant_value except NotImplementedError: value = constant.constant_value runtime_constant = RuntimeConstant( spec_version=spec_version, module_id=module_id, index=idx, name=constant.name, type=constant.type, value=value, documentation='\n'.join(constant.docs)) runtime_constant.save(self.db_session) # Check if types already registered in database self.process_metadata_type( constant.type, spec_version) runtime.save(self.db_session) self.db_session.commit() # Put in local store self.metadata_store[spec_version] = metadata_decoder except SQLAlchemyError as e: self.db_session.rollback()