def process_metadata(self, spec_version, block_hash): # Check if metadata already stored runtime = Runtime.query(self.db_session).get(spec_version) if runtime: if spec_version in self.substrate.metadata_cache: self.metadata_store[ spec_version] = self.substrate.metadata_cache[spec_version] else: self.metadata_store[ spec_version] = self.substrate.get_block_metadata( block_hash=block_hash) else: print('Metadata: CACHE MISS', spec_version) runtime_version_data = self.substrate.get_block_runtime_version( block_hash) self.db_session.begin(subtransactions=True) try: # Store metadata in database runtime = Runtime( id=spec_version, impl_name=runtime_version_data["implName"], impl_version=runtime_version_data["implVersion"], spec_name=runtime_version_data["specName"], spec_version=spec_version, json_metadata=str(self.substrate.metadata_decoder.data), json_metadata_decoded=self.substrate.metadata_decoder. value, apis=runtime_version_data["apis"], authoring_version=runtime_version_data["authoringVersion"], count_call_functions=0, count_events=0, count_modules=len( self.substrate.metadata_decoder.metadata.modules), count_storage_functions=0, count_constants=0, count_errors=0) runtime.save(self.db_session) print('store version to db', self.substrate.metadata_decoder.version) for module_index, module in enumerate( self.substrate.metadata_decoder.metadata.modules): # Check if module exists if RuntimeModule.query(self.db_session).filter_by( spec_version=spec_version, module_id=module.get_identifier()).count() == 0: module_id = module.get_identifier() else: module_id = '{}_1'.format(module.get_identifier()) # Storage backwards compt check if module.storage and isinstance(module.storage, list): storage_functions = module.storage elif module.storage and isinstance( getattr(module.storage, 'value'), dict): storage_functions = module.storage.items else: storage_functions = [] runtime_module = RuntimeModule( spec_version=spec_version, module_id=module_id, prefix=module.prefix, name=module.name, count_call_functions=len(module.calls or []), count_storage_functions=len(storage_functions), count_events=len(module.events or []), count_constants=len(module.constants or []), count_errors=len(module.errors or []), ) runtime_module.save(self.db_session) # Update totals in runtime runtime.count_call_functions += runtime_module.count_call_functions runtime.count_events += runtime_module.count_events runtime.count_storage_functions += runtime_module.count_storage_functions runtime.count_constants += runtime_module.count_constants runtime.count_errors += runtime_module.count_errors if len(module.calls or []) > 0: for idx, call in enumerate(module.calls): runtime_call = RuntimeCall( spec_version=spec_version, module_id=module_id, call_id=call.get_identifier(), index=idx, name=call.name, lookup=call.lookup, documentation='\n'.join(call.docs), count_params=len(call.args)) runtime_call.save(self.db_session) for arg in call.args: runtime_call_param = RuntimeCallParam( runtime_call_id=runtime_call.id, name=arg.name, type=arg.type) runtime_call_param.save(self.db_session) if len(module.events or []) > 0: for event_index, event in enumerate(module.events): runtime_event = RuntimeEvent( spec_version=spec_version, module_id=module_id, event_id=event.name, index=event_index, name=event.name, lookup=event.lookup, documentation='\n'.join(event.docs), count_attributes=len(event.args)) runtime_event.save(self.db_session) for arg_index, arg in enumerate(event.args): runtime_event_attr = RuntimeEventAttribute( runtime_event_id=runtime_event.id, index=arg_index, type=arg) runtime_event_attr.save(self.db_session) if len(storage_functions) > 0: for idx, storage in enumerate(storage_functions): # Determine type type_hasher = None type_key1 = None type_key2 = None type_value = None type_is_linked = None type_key2hasher = None if storage.type.get('PlainType'): type_value = storage.type.get('PlainType') elif storage.type.get('MapType'): type_hasher = storage.type['MapType'].get( 'hasher') type_key1 = storage.type['MapType'].get('key') type_value = storage.type['MapType'].get( 'value') type_is_linked = storage.type['MapType'].get( 'isLinked', False) elif storage.type.get('DoubleMapType'): type_hasher = storage.type[ 'DoubleMapType'].get('hasher') type_key1 = storage.type['DoubleMapType'].get( 'key1') type_key2 = storage.type['DoubleMapType'].get( 'key2') type_value = storage.type['DoubleMapType'].get( 'value') type_key2hasher = storage.type[ 'DoubleMapType'].get('key2Hasher') runtime_storage = RuntimeStorage( spec_version=spec_version, module_id=module_id, index=idx, name=storage.name, lookup=None, default=storage.fallback, modifier=storage.modifier, type_hasher=type_hasher, storage_key=xxh128(module.prefix.encode()) + xxh128(storage.name.encode()), type_key1=type_key1, type_key2=type_key2, type_value=type_value, type_is_linked=type_is_linked, type_key2hasher=type_key2hasher, documentation='\n'.join(storage.docs)) runtime_storage.save(self.db_session) if len(module.constants or []) > 0: for idx, constant in enumerate(module.constants): # Decode value try: value_obj = ScaleDecoder.get_decoder_class( constant.type, ScaleBytes(constant.constant_value)) value_obj.decode() value = value_obj.serialize() except ValueError: value = constant.constant_value except RemainingScaleBytesNotEmptyException: value = constant.constant_value except NotImplementedError: value = constant.constant_value if type(value) is list or type(value) is dict: value = json.dumps(value) runtime_constant = RuntimeConstant( spec_version=spec_version, module_id=module_id, index=idx, name=constant.name, type=constant.type, value=value, documentation='\n'.join(constant.docs)) runtime_constant.save(self.db_session) if len(module.errors or []) > 0: for idx, error in enumerate(module.errors): runtime_error = RuntimeErrorMessage( spec_version=spec_version, module_id=module_id, module_index=module_index, index=idx, name=error.name, documentation='\n'.join(error.docs)) runtime_error.save(self.db_session) runtime.save(self.db_session) # Process types for runtime_type_data in list( self.substrate.get_type_registry( block_hash=block_hash).values()): runtime_type = RuntimeType( spec_version=runtime_type_data["spec_version"], type_string=runtime_type_data["type_string"], decoder_class=runtime_type_data["decoder_class"], is_primitive_core=runtime_type_data[ "is_primitive_core"], is_primitive_runtime=runtime_type_data[ "is_primitive_runtime"]) runtime_type.save(self.db_session) self.db_session.commit() # Put in local store self.metadata_store[ spec_version] = self.substrate.metadata_decoder except SQLAlchemyError as e: self.db_session.rollback()
def test_balance_check_rpc_breakdown(self): """ This test breaks down every RPC call that happens to get balance It is here only for the purpose of understanding how everything works Steps: Step 1: Initial preparatory steps - Connect to node - RPC Call 1: chain_getFinalisedHead - Get current finalised block hash Step 2: Get MetaData for the current block hash - RPC Call 2: state_getMetadata - Decode the result using MetadataDecoder and ScaleBytes Step 3: Prepare hashed data to make the next RPC call - We need three params hashed into one single hash string - Storage Module, Storage Function and Payload - Each param is encoded and hashed using various hashers Hasher for Payload is obtained from the dict at the bottom of the docstring Step 4: Get data at the storage hash prepared in Step 3 - RPC Call 3: state_getStorageAt - Decode the data using ScaleDecoder's special class This is a key information used to encode/hash/decode in the entire function { 'name': 'Account', 'modifier': 'Default', 'type': { 'MapType': { 'hasher': 'Blake2_128Concat', 'key': 'AccountId', 'value': 'AccountInfo<Index, AccountData>', 'isLinked': False } }, } """ # Test data test_address = "HsgNgA5sgjuKxGUeaZPJE8rRn9RuixjvnPkVLFUYLEpj15G" ### STEP 1 # Connect to the node substrate = SubstrateInterface( url=settings.NODE_URL, address_type=2, type_registry_preset="kusama", ) # Get finalised block hash block_hash = substrate.rpc_request("chain_getFinalisedHead", []).get("result") if not block_hash: raise Exception( "ERROR: RPC call for chain_getFinalisedHead failed") print("\n\n") print("-" * 100) print(f"BLOCK HASH: {block_hash}") ### STEP 2 # Get metadata decoder, this is needed later metadata_result = substrate.rpc_request("state_getMetadata", [block_hash]).get("result") if not metadata_result: raise Exception("ERROR: RPC call for state_getMetadata failed") metadata_encoded = MetadataDecoder(ScaleBytes(metadata_result)) metadata = metadata_encoded.decode() ### STEP 3 # This comes from the metadata dict in the docstring `type` -> `MapType` -> `key` map_type_key = "AccountId" test_address_modified = "0x{}".format(ss58.ss58_decode( test_address, 2)) print(f"TEST ADDRESS SS58 DECODED: {test_address_modified}") scale_decoder = ScaleDecoder.get_decoder_class(map_type_key) test_address_encoded = scale_decoder.encode(test_address_modified) print(f"TEST ADDRESS ENCODED: {test_address_encoded}") # Why blake2_128_concat? Because metadata dict in the docstring `type` -> `MapType` -> `hasher` test_address_hash = hasher.blake2_128_concat(test_address_encoded.data) # `System` is our module and `Account` if our function for this example storage_module = "System" storage_function = "Account" storage_module_hash = hasher.xxh128(storage_module.encode()) storage_function_hash = hasher.xxh128(storage_function.encode()) print(f"STORAGE MODULE: {storage_module}") print(f"STORAGE MODULE ENCODED: {storage_module.encode()}") print(f"STORAGE MODULE ENCODED HASHED: {storage_module_hash}") print(f"STORAGE FUNCTION: {storage_function}") print(f"STORAGE FUNCTION ENCODED: {storage_function.encode()}") print(f"STORAGE FUNCTION ENCODED HASHED: {storage_function_hash}") print(f"TEST ADDRESS: {test_address}") print(f"TEST ADDRESS SS58 DECODED: {test_address_modified}") print(f"TEST ADDRESS ENCODED: {test_address_encoded}") print(f"TEST ADDRESS ENCODED HASHED: {test_address_hash}") storage_hash = (f"0x" f"{storage_module_hash}" f"{storage_function_hash}" f"{test_address_hash}") print(f"COMBINED HASH: {storage_hash}") ### STEP 4 response = substrate.rpc_request("state_getStorageAt", [storage_hash, block_hash]) result = response.get("result") if not result: raise Exception("ERROR: RPC call for state_getStorageAt failed") print(f"RPC RESULT: {result}") print("-" * 100) print("DECODING ABOVE RESULT ... ...") # This is again extracted from the metadata dict in the docstring `type` -> `MapType` -> `value` return_type = "AccountInfo<Index, AccountData>" result_decoded = ScaleDecoder.get_decoder_class( return_type, ScaleBytes(result), metadata=metadata).decode() print(f"RESULT DECODED: {result_decoded}")