def load_historical_network_tpc_capability( self, sort: bool = True) -> Optional[List[List[Union[Timestamp, int]]]]: ''' Returns a list of [timestamp, transactions per second] :param mutable: :param sort: :return: ''' lookup_key = SchemaV1.make_historical_network_tpc_capability_lookup_key( ) try: data = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList( rlp.sedes.FList([ rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int ])), use_list=True) if sort: if len(data) > 0: data.sort() return data except KeyError: return None
def _set_as_canonical_chain_head( self, block_hash: Hash32) -> Tuple[BlockHeader, ...]: """ Sets the canonical chain HEAD to the block header as specified by the given block hash. Returns iterable of headers newly on the canonical head """ try: header = self.get_block_header_by_hash(block_hash) except HeaderNotFound: raise ValueError( "Cannot use unknown block hash as canonical head: {}".format( block_hash)) new_canonical_headers = tuple( reversed(self._find_new_ancestors(header))) for h in new_canonical_headers: self._add_block_number_to_hash_lookup(h) self.db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash) return new_canonical_headers
def load_historical_minimum_gas_price( self, sort: bool = True, return_int=True) -> Optional[List[List[Union[Timestamp, int]]]]: ''' saved as timestamp, min gas price It is now divided by 100 to get decimals back ''' lookup_key = SchemaV1.make_historical_minimum_gas_price_lookup_key() try: data = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList( rlp.sedes.FList([ rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int ])), use_list=True) if sort: if len(data) > 0: data.sort() return_data = [] for timestamp_gas_price in data: if return_int: return_data.append([ timestamp_gas_price[0], int(timestamp_gas_price[1] / 100) ]) else: return_data.append( [timestamp_gas_price[0], timestamp_gas_price[1] / 100]) return return_data except KeyError: return None
def get_historical_root_hashes(self, after_timestamp: Timestamp = None) -> Optional[List[List[Union[Timestamp, Hash32]]]]: ''' This has been performance optimized December 22, 2018 :param after_timestamp: :return: ''' # Automatically sort when loading because we know the data will never be a mix of lists and tuples historical_head_root_lookup_key = SchemaV1.make_historical_head_root_lookup_key() try: data = rlp.decode(self.db[historical_head_root_lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([f_big_endian_int, hash32])), use_list=True) data.sort() except KeyError: return None if after_timestamp is None: to_return = data else: timestamps = [x[0] for x in data] index = bisect.bisect_left(timestamps, after_timestamp) to_return = data[index:] if len(to_return) == 0: return None return to_return
def test_chaindb_persist_block(chaindb, block): block = block.copy(header=set_empty_root(chaindb, block.header)) block_to_hash_key = SchemaV1.make_block_hash_to_score_lookup_key( block.hash) assert not chaindb.exists(block_to_hash_key) chaindb.persist_block(block) assert chaindb.exists(block_to_hash_key)
def delete_historical_root_hashes(self) -> None: historical_head_root_lookup_key = SchemaV1.make_historical_head_root_lookup_key( ) try: del (self.db[historical_head_root_lookup_key]) except KeyError: pass
def set_current_syncing_info(self, timestamp: Timestamp, head_root_hash: Hash32) -> None: validate_is_bytes(head_root_hash, title='Head Root Hash') validate_uint256(timestamp, title='timestamp') encoded = rlp.encode([timestamp, head_root_hash], sedes=CurrentSyncingInfo) self.db[SchemaV1.make_current_syncing_info_lookup_key()] = encoded
def save_now_as_last_min_gas_price_PID_update(self) -> None: now = int(time.time()) lookup_key = SchemaV1.make_min_gas_system_last_pid_time_key() encoded_data = rlp.encode(now, sedes=rlp.sedes.f_big_endian_int) self.db.set( lookup_key, encoded_data, )
def _remove_address_from_smart_contracts_with_pending_transactions(self, address: Address) -> None: key = SchemaV1.make_smart_contracts_with_pending_transactions_lookup_key() address_set = set(self.get_smart_contracts_with_pending_transactions()) address_set.remove(address) self.db[key] = rlp.encode(list(address_set), sedes=rlp.sedes.FCountableList(address))
def get_smart_contracts_with_pending_transactions(self) -> List[Address]: key = SchemaV1.make_smart_contracts_with_pending_transactions_lookup_key() try: address_list = rlp.decode(self.db[key], sedes=rlp.sedes.FCountableList(address), use_list=True) return address_list except KeyError: return []
def load_saved_root_hash(self): current_head_root_lookup_key = SchemaV1.make_current_head_root_lookup_key() try: loaded_root_hash = self.db[current_head_root_lookup_key] self.root_hash = loaded_root_hash except KeyError: #there is none. this must be a fresh genesis block type thing pass
def get_score(self, block_hash: Hash32) -> int: try: encoded_score = self.db[ SchemaV1.make_block_hash_to_score_lookup_key(block_hash)] except KeyError: raise HeaderNotFound("No header with hash {0} found".format( encode_hex(block_hash))) return rlp.decode(encoded_score, sedes=rlp.sedes.big_endian_int)
def save_current_account_with_hash_lookup(self, wallet_address): validate_canonical_address(wallet_address, title="Address") account_hash = self.get_account_hash(wallet_address) account = self._get_account(wallet_address) rlp_account = rlp.encode(account, sedes=Account) lookup_key = SchemaV1.make_account_by_hash_lookup_key(account_hash) self.db[lookup_key] = rlp_account
def get_timestamp_of_last_health_request(self) -> Timestamp: lookup_key = SchemaV1.make_latest_peer_node_health_timestamp_lookup_key( ) try: return rlp.decode(self.db[lookup_key], sedes=rlp.sedes.f_big_endian_int) except KeyError: return 0
def get_saved_root_hash(self): current_head_root_lookup_key = SchemaV1.make_current_head_root_lookup_key() try: root_hash = self.db[current_head_root_lookup_key] except KeyError: # there is none. this must be a fresh genesis block type thing root_hash = BLANK_HASH return root_hash
def _get_account(self, address): account_lookup_key = SchemaV1.make_account_lookup_key(address) rlp_account = self._journaldb.get(account_lookup_key, b'') if rlp_account: account = rlp.decode(rlp_account, sedes=Account) #account = hm_decode(rlp_account, sedes_classes=[Account]) else: account = Account() return account
def _set_peer_node_health(self, peer_wallet_address: Address, after_block_number: BlockNumber, peer_node_health: PeerNodeHealth) -> None: encoded_peer_node_health = rlp.encode(peer_node_health, sedes=PeerNodeHealth) key = SchemaV1.make_peer_node_health_lookup(peer_wallet_address, after_block_number) self.db[key] = encoded_peer_node_health
def get_canonical_head(self) -> BlockHeader: """ Returns the current block header at the head of the chain. """ try: canonical_head_hash = self.db[ SchemaV1.make_canonical_head_hash_lookup_key()] except KeyError: raise CanonicalHeadNotFound("No canonical head set for this chain") return self.get_block_header_by_hash(canonical_head_hash)
def revert_to_account_from_hash(self, account_hash, wallet_address): validate_canonical_address(wallet_address, title="Address") validate_is_bytes(account_hash, title="account_hash") lookup_key = SchemaV1.make_account_by_hash_lookup_key(account_hash) try: rlp_encoded = self.db[lookup_key] account = rlp.decode(rlp_encoded, sedes=Account) self._set_account(wallet_address, account) except KeyError: raise StateRootNotFound()
def test_chaindb_persist_header(chaindb, header): with pytest.raises(HeaderNotFound): chaindb.get_block_header_by_hash(header.hash) number_to_hash_key = SchemaV1.make_block_hash_to_score_lookup_key( header.hash) assert not chaindb.exists(number_to_hash_key) chaindb.persist_header(header) assert chaindb.get_block_header_by_hash(header.hash) == header assert chaindb.exists(number_to_hash_key)
def delete_chronological_block_window(self, timestamp): validate_uint256(timestamp, title='timestamp') if timestamp % TIME_BETWEEN_HEAD_HASH_SAVE != 0: raise InvalidHeadRootTimestamp("Can only save or load chronological block for timestamps in increments of {} seconds.".format(TIME_BETWEEN_HEAD_HASH_SAVE)) self.logger.debug("deleting chronological block window for timestamp {}".format(timestamp)) chronological_window_lookup_key = SchemaV1.make_chronological_window_lookup_key(timestamp) try: del(self.db[chronological_window_lookup_key]) except KeyError: pass
def _add_block_number_to_hash_lookup(self, header: BlockHeader) -> None: """ Sets a record in the database to allow looking up this header by its block number. """ block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key( header.block_number) self.db.set( block_number_to_hash_key, rlp.encode(header.hash, sedes=rlp.sedes.binary), )
def save_chronological_block_window(self, data, timestamp): validate_uint256(timestamp, title='timestamp') if timestamp % TIME_BETWEEN_HEAD_HASH_SAVE != 0: raise InvalidHeadRootTimestamp("Can only save or load chronological block for timestamps in increments of {} seconds.".format(TIME_BETWEEN_HEAD_HASH_SAVE)) chronological_window_lookup_key = SchemaV1.make_chronological_window_lookup_key(timestamp) encoded_data = rlp.encode(data,sedes=rlp.sedes.FCountableList(rlp.sedes.FList([f_big_endian_int, hash32]))) self.db.set( chronological_window_lookup_key, encoded_data, )
def save_current_root_hash(self) -> None: """ Saves the current root_hash to the database to be loaded later """ self.logger.debug("Saving current chain head root hash {}".format(encode_hex(self.root_hash))) current_head_root_lookup_key = SchemaV1.make_current_head_root_lookup_key() self.db.set( current_head_root_lookup_key, self.root_hash, )
def save_historical_root_hashes(self, root_hashes): historical_head_root_lookup_key = SchemaV1.make_historical_head_root_lookup_key( ) data = rlp.encode(root_hashes, sedes=rlp.sedes.FCountableList( rlp.sedes.FList([f_big_endian_int, hash32]))) self.db.set( historical_head_root_lookup_key, data, )
def load_root_hash_backup(self) -> List[Tuple[int, Hash32]]: db_key = SchemaV1.make_chain_head_root_hash_backup_key() try: data = rlp.decode(self.db[db_key], sedes=rlp.sedes.FCountableList( rlp.sedes.FList([f_big_endian_int, hash32])), use_list=True) data.sort() return data except KeyError: return []
def load_chronological_block_window(self, timestamp: Timestamp) -> Optional[List[Union[int, Hash32]]]: validate_uint256(timestamp, title='timestamp') if timestamp % TIME_BETWEEN_HEAD_HASH_SAVE != 0: raise InvalidHeadRootTimestamp("Can only save or load chronological block for timestamps in increments of {} seconds.".format(TIME_BETWEEN_HEAD_HASH_SAVE)) chronological_window_lookup_key = SchemaV1.make_chronological_window_lookup_key(timestamp) try: data = rlp.decode(self.db[chronological_window_lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([f_big_endian_int, hash32])), use_list = True) data.sort() return data except KeyError: return None
def save_root_hash_backup( self, root_hash_backup_timestamps: List[Tuple[int, Hash32]]) -> None: db_key = SchemaV1.make_chain_head_root_hash_backup_key() encoded_data = rlp.encode(root_hash_backup_timestamps, sedes=rlp.sedes.FCountableList( rlp.sedes.FList( [f_big_endian_int, hash32]))) self.db.set( db_key, encoded_data, )
def load_from_saved_root_hash(cls, db) -> 'ChainHeadDB': """ Loads this class from the last saved root hash """ current_head_root_lookup_key = SchemaV1.make_current_head_root_lookup_key() try: loaded_root_hash = db[current_head_root_lookup_key] except KeyError: #there is none. this must be a fresh genesis block type thing return cls(db) return cls(db, loaded_root_hash)
def save_historical_root_hashes(self, root_hashes): #if root_hashes[-1][0] == 1534567000: # exit() if len(root_hashes) > NUMBER_OF_HEAD_HASH_TO_SAVE: root_hashes = root_hashes[-NUMBER_OF_HEAD_HASH_TO_SAVE:] historical_head_root_lookup_key = SchemaV1.make_historical_head_root_lookup_key() data = rlp.encode(root_hashes, sedes=rlp.sedes.FCountableList(rlp.sedes.FList([f_big_endian_int, hash32]))) self.db.set( historical_head_root_lookup_key, data, )