def test_config_ledger_txns(looper, setup, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Do some writes and reads on the config ledger """ old_config_ledger_size = None old_bls_store_size = None state_root_hashes = set() state = txnPoolNodeSet[0].getState(CONFIG_LEDGER_ID) for node in txnPoolNodeSet: if old_config_ledger_size is None: old_config_ledger_size = len(node.getLedger(CONFIG_LEDGER_ID)) old_bls_store_size = node.bls_bft.bls_store._kvs.size else: assert len(node.getLedger(CONFIG_LEDGER_ID)) == old_config_ledger_size assert node.bls_bft.bls_store._kvs.size == old_bls_store_size # Do a write txn key, val = 'test_key', 'test_val' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key', 'test_val1' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key1', 'test_val11' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val for node in txnPoolNodeSet: # Not all batches might have BLS-sig but at least one of them will have assert node.bls_bft.bls_store._kvs.size > old_bls_store_size # At least one state root hash should be in the BLS store found = False for root_hash in state_root_hashes: if node.bls_bft.bls_store.get(root_hash) is not None: found = True break assert found
def make_proof(self, path, head_hash=None): ''' Creates a state proof for the given path in state trie. Returns None if there is no BLS multi-signature for the given state (it can be the case for txns added before multi-signature support). :param path: the path generate a state proof for :return: a state proof or None ''' root_hash = head_hash if head_hash else self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self.bls_store.get(encoded_root_hash) if not multi_sig: return None proof = self.state.generate_state_proof(key=path, root=self.state.get_head_by_hash( root_hash), serialize=True) encoded_proof = proof_nodes_serializer.serialize(proof) return { ROOT_HASH: encoded_root_hash, MULTI_SIGNATURE: multi_sig.as_dict(), PROOF_NODES: encoded_proof }
def upload_pool_state(self): self.init_state_from_ledger(self.node.states[POOL_LEDGER_ID], self.node.poolLedger) logger.info("{} initialized pool state: state root {}".format( self, state_roots_serializer.serialize( bytes(self.node.states[POOL_LEDGER_ID].committedHeadHash))))
def upload_domain_state(self): self.init_state_from_ledger(self.node.states[DOMAIN_LEDGER_ID], self.node.domainLedger) logger.info("{} initialized domain state: state root {}".format( self, state_roots_serializer.serialize( bytes(self.node.states[DOMAIN_LEDGER_ID].committedHeadHash))))
def _get_fees(self, is_committed=False, with_proof=False): fees = {} proof = None try: if with_proof: proof, serz = self.state.generate_state_proof( self.fees_state_key, serialize=True, get_value=True) if serz: serz = rlp_decode(serz)[0] root_hash = self.state.committedHeadHash if is_committed else self.state.headHash encoded_root_hash = state_roots_serializer.serialize( bytes(root_hash)) multi_sig = self.bls_store.get(encoded_root_hash) if multi_sig: encoded_proof = proof_nodes_serializer.serialize(proof) proof = { MULTI_SIGNATURE: multi_sig.as_dict(), ROOT_HASH: encoded_root_hash, PROOF_NODES: encoded_proof } else: proof = {} else: serz = self.state.get(self.fees_state_key, isCommitted=is_committed) if serz: fees = self.state_serializer.deserialize(serz) except KeyError: pass if with_proof: return fees, proof return fees
def upload_config_state(self): self.init_state_from_ledger(self.node.states[CONFIG_LEDGER_ID], self.node.configLedger) logger.info("{} initialized config state: state root {}".format( self, state_roots_serializer.serialize( bytes(self.node.states[CONFIG_LEDGER_ID].committedHeadHash))))
def get_multi_sig_values_for_all_nodes(txnPoolNodeSet, ledger_id): result = [] for node in txnPoolNodeSet: bls_multi_sig = node.bls_bft.bls_store.get( state_roots_serializer.serialize(bytes(node.states[ledger_id].committedHeadHash)) ) result.append(bls_multi_sig.value if bls_multi_sig else None) return result
def get_state_root_hash(self, ledger_id, to_str=True, committed=False): state = self.get_state(ledger_id) if state is None: return None root = state.committedHeadHash if committed else state.headHash if to_str: root = state_roots_serializer.serialize(bytes(root)) return root
def save_multi_sig(request_handler): multi_sig_value = MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID, state_root_hash=state_roots_serializer.serialize( bytes(request_handler.state.committedHeadHash)), txn_root_hash='2' * 32, pool_state_root_hash='1' * 32, timestamp=get_utc_epoch()) multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], multi_sig_value) request_handler.bls_store.put(multi_sig) return multi_sig.as_dict()
def get_all_multi_sig_values_for_all_nodes(txnPoolNodeSet): result = [] for node in txnPoolNodeSet: for ledger_id, state in node.states.items(): # TODO: Is it expected that in case of identical states (for example both empty) # this can mix ledgers up? bls_multi_sig = node.bls_bft.bls_store.get( state_roots_serializer.serialize(bytes(state.committedHeadHash)) ) result.append((ledger_id, bls_multi_sig.value) if bls_multi_sig else (ledger_id, None)) return result
def get_all_multi_sig_values_for_all_nodes(txnPoolNodeSet): result = [] for node in txnPoolNodeSet: for ledger_id, state in node.states.items(): bls_multi_sig = node.bls_bft.bls_store.get( state_roots_serializer.serialize(bytes( state.committedHeadHash))) result.append((ledger_id, bls_multi_sig.value) if bls_multi_sig else ( ledger_id, None)) return result
def _get_auth_rule(self, path): multi_sig = None if self.bls_store: root_hash = self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self.bls_store.get(encoded_root_hash) map_data, proof = self.get_value_from_state(path, with_proof=True, multi_sig=multi_sig) if map_data: data = self.constraint_serializer.deserialize(map_data) else: data = self.write_req_validator.auth_map[path] return {path: data.as_dict}, proof
def _get_auth_rule(self, key): multi_sig = None if self._bls_store: root_hash = self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self._bls_store.get(encoded_root_hash) path = config.make_state_path_for_auth_rule(key) map_data, proof = self.get_value_from_state(path, with_proof=True, multi_sig=multi_sig) if map_data: data = self.constraint_serializer.deserialize(map_data) else: data = self.write_req_validator.auth_map[key] action_obj = split_action_id(key) return [self.make_get_auth_rule_result(data, action_obj)], proof
def _get_auth_rule(self, key): multi_sig = None if self.bls_store: root_hash = self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self.bls_store.get(encoded_root_hash) path = config.make_state_path_for_auth_rule(key) map_data, proof = self.get_value_from_state(path, with_proof=True, multi_sig=multi_sig) if map_data: data = self.constraint_serializer.deserialize(map_data) else: data = self.write_req_validator.auth_map[key] data_dict = data.as_dict if data is not None else {} return {path: data_dict}, proof
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None): ''' Get a value (and proof optionally)for the given path in state trie. Does not return the proof is there is no aggregate signature for it. :param path: the path generate a state proof for :param head_hash: the root to create the proof against :param get_value: whether to return the value :return: a state proof or None ''' if not multi_sig and with_proof: root_hash = head_hash if head_hash else self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) multi_sig = self._bls_store.get(encoded_root_hash) return super().get_value_from_state(path, head_hash, with_proof, multi_sig)
def _init_state_from_ledger(self, ledger_id: int): """ If the trie is empty then initialize it by applying txns from ledger. """ state = self.db_manager.get_state(ledger_id) if not state or state.closed: return if state.isEmpty: logger.info('{} found state to be empty, recreating from ledger {}'.format(self, ledger_id)) ledger = self.db_manager.get_ledger(ledger_id) for seq_no, txn in ledger.getAllTxn(): txn = self._update_txn_with_extra_data(txn) self.write_manager.restore_state(txn, ledger_id) logger.info( "{} initialized state for ledger {}: state root {}".format( self, ledger_id, state_roots_serializer.serialize(bytes(state.committedHeadHash))))
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None): ''' Get a value (and proof optionally)for the given path in state trie. Does not return the proof is there is no aggregate signature for it. :param path: the path generate a state proof for :param head_hash: the root to create the proof against :param get_value: whether to return the value :return: a state proof or None ''' root_hash = head_hash if head_hash else self.state.committedHeadHash encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) if not with_proof: return self.state.get_for_root_hash(root_hash, path), None if not multi_sig: # Just return the value and not proof try: return self.state.get_for_root_hash(root_hash, path), None except KeyError: return None, None else: try: proof, value = self.state.generate_state_proof( key=path, root=self.state.get_head_by_hash(root_hash), serialize=True, get_value=True) value = self.state.get_decoded(value) if value else value encoded_proof = proof_nodes_serializer.serialize(proof) proof = { ROOT_HASH: encoded_root_hash, MULTI_SIGNATURE: multi_sig.as_dict(), PROOF_NODES: encoded_proof } return value, proof except KeyError: return None, None
def get_all_utxo(self, request: Request): address = request.operation[ADDRESS] encoded_root_hash = state_roots_serializer.serialize( bytes(self.state.committedHeadHash)) proof, rv = self.state.generate_state_proof_for_keys_with_prefix( address, serialize=True, get_value=True) multi_sig = self.bls_store.get(encoded_root_hash) if multi_sig: encoded_proof = proof_nodes_serializer.serialize(proof) proof = { MULTI_SIGNATURE: multi_sig.as_dict(), ROOT_HASH: encoded_root_hash, PROOF_NODES: encoded_proof } else: proof = {} # The outputs need to be returned in sorted order since each node's reply should be same. # Since no of outputs can be large, a concious choice to not use `operator.attrgetter` on an # already constructed list was made outputs = SortedItems() for k, v in rv.items(): addr, seq_no = self.parse_state_key(k.decode()) amount = rlp_decode(v)[0] if not amount: continue outputs.add(Output(addr, int(seq_no), int(amount))) result = { f.IDENTIFIER.nm: request.identifier, f.REQ_ID.nm: request.reqId, OUTPUTS: outputs.sorted_list } if proof: result[STATE_PROOF] = proof result.update(request.operation) return result
def sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_handle, saved_multi_sigs_count): # at least two because first request could have no # signature since state can be clear number_of_requests = 3 # 1. send requests # Using loop to avoid 3pc batching state_roots = [] for i in range(number_of_requests): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_handle, 1) waitNodeDataEquality(looper, txnPoolNodeSet[0], *txnPoolNodeSet[:-1]) state_roots.append( state_roots_serializer.serialize( bytes(txnPoolNodeSet[0].getState( DOMAIN_LEDGER_ID).committedHeadHash))) # 2. get all saved multi-sigs multi_sigs_for_batch = [] for state_root in state_roots: multi_sigs = [] for node in txnPoolNodeSet: multi_sig = node.bls_bft.bls_store.get(state_root) if multi_sig: multi_sigs.append(multi_sig) multi_sigs_for_batch.append(multi_sigs) # 3. check how many multi-sigs are saved for multi_sigs in multi_sigs_for_batch: assert len(multi_sigs) == saved_multi_sigs_count, \ "{} != {}".format(len(multi_sigs), saved_multi_sigs_count) # 3. check that bls multi-sig is the same for all nodes we get PrePrepare for (that is for all expect the last one) for multi_sigs in multi_sigs_for_batch[:-1]: if multi_sigs: assert multi_sigs.count(multi_sigs[0]) == len(multi_sigs)
def sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_handle, saved_multi_sigs_count): # at least two because first request could have no # signature since state can be clear number_of_requests = 3 # 1. send requests # Using loop to avoid 3pc batching state_roots = [] for i in range(number_of_requests): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_handle, 1) waitNodeDataEquality(looper, txnPoolNodeSet[0], *txnPoolNodeSet[:-1]) state_roots.append( state_roots_serializer.serialize( bytes(txnPoolNodeSet[0].getState(DOMAIN_LEDGER_ID).committedHeadHash))) # 2. get all saved multi-sigs multi_sigs_for_batch = [] for state_root in state_roots: multi_sigs = [] for node in txnPoolNodeSet: multi_sig = node.bls_bft.bls_store.get(state_root) if multi_sig: multi_sigs.append(multi_sig) multi_sigs_for_batch.append(multi_sigs) # 3. check how many multi-sigs are saved for multi_sigs in multi_sigs_for_batch: assert len(multi_sigs) == saved_multi_sigs_count, \ "{} != {}".format(len(multi_sigs), saved_multi_sigs_count) # 3. check that bls multi-sig is the same for all nodes we get PrePrepare for (that is for all expect the last one) for multi_sigs in multi_sigs_for_batch[:-1]: if multi_sigs: assert multi_sigs.count(multi_sigs[0]) == len(multi_sigs)
class PP: plugin_data = { FEES: { FEE_TXNS_IN_BATCH: 1, f.STATE_ROOT.nm: state_roots_serializer.serialize(BLANK_ROOT), f.TXN_ROOT.nm: 'VNsWDU8rZ9Jz9NF', } } @staticmethod def create_pre_prepare(): pre_prepare_args = [ # instIds 0, # viewNo 0, # ppSeqNo 2, # ppTime get_utc_epoch(), # reqIdr ['B8fV7naUqLATYocqu7yZ8WM9BJDuS24bqbJNvBRsoGg3'], # discarded "", # digest 'ccb7388bc43a1e4669a23863c2b8c43efa183dde25909541b06c0f5196ac4f3b', # ledger id CONFIG_LEDGER_ID, # state root '5BU5Rc3sRtTJB6tVprGiTSqiRaa9o6ei11MjH4Vu16ms', # txn root 'EdxDR8GUeMXGMGtQ6u7pmrUgKfc2XdunZE79Z9REEHg6', # sub_seq_no 0, # final True ] return PrePrepare(*pre_prepare_args) @staticmethod def valid_pre_prepare(pp, monkeypatch, three_phase_handler): def mock_get_state_root(ledger_id): if ledger_id == TOKEN_LEDGER_ID: return PP.plugin_data[FEES][f.STATE_ROOT.nm] else: return 'Pulled state root from a different ledger than sovtoken' def mock_get_txn_root(ledger_id): if ledger_id == TOKEN_LEDGER_ID: return PP.plugin_data[FEES][f.TXN_ROOT.nm] else: return 'Pulled txn root from a different ledger than sovtoken' state_root_deserialized = state_roots_serializer.deserialize(PP.plugin_data[FEES][f.STATE_ROOT.nm]) txn_root_deserialized = state_roots_serializer.deserialize(PP.plugin_data[FEES][f.TXN_ROOT.nm]) monkeypatch.setattr(three_phase_handler.fees_req_handler, 'fee_txns_in_current_batch', 1) monkeypatch.setattr(three_phase_handler.master_replica, 'stateRootHash', mock_get_state_root) monkeypatch.setattr(three_phase_handler.master_replica, 'txnRootHash', mock_get_txn_root) monkeypatch.setattr(three_phase_handler.fees_req_handler.token_state._trie, 'root_hash', state_root_deserialized) monkeypatch.setattr(three_phase_handler.fees_req_handler.token_ledger, 'uncommittedRootHash', txn_root_deserialized) return three_phase_handler.add_to_pre_prepare(pp) @staticmethod def from_request(req, three_phase_handler): replica = three_phase_handler.master_replica args = [ replica.instId, replica.viewNo, replica.lastPrePrepareSeqNo + 1, get_utc_epoch(), [req.digest], "", req.digest, CONFIG_LEDGER_ID, replica.stateRootHash(TOKEN_LEDGER_ID), replica.txnRootHash(TOKEN_LEDGER_ID), 0, True ] return PrePrepare(*args)
def add_bls_multi_sig(domain_req_handler, root_hash): encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) domain_req_handler.bls_store.put(create_bls_multi_sig(encoded_root_hash))
def _bad_hash_serialized(self): return state_roots_serializer.serialize(self._bad_hash_unserialized())
class PP: plugin_data = { FEES: { FEE_TXNS_IN_BATCH: 1, f.STATE_ROOT.nm: state_roots_serializer.serialize(BLANK_ROOT), f.TXN_ROOT.nm: 'VNsWDU8rZ9Jz9NF', } } @staticmethod def fake_pp(ledger_id=DOMAIN_LEDGER_ID): params = create_pre_prepare_params(state_root=generate_state_root(), ledger_id=ledger_id) return PrePrepare(*params) @staticmethod def fake_pp_without_fees(three_phase_handler, ledger_id=DOMAIN_LEDGER_ID): pp = PP.fake_pp(ledger_id=ledger_id) pp_with_fees = three_phase_handler.add_to_pre_prepare(pp) return pp_with_fees @staticmethod def fake_pp_with_fees(monkeypatch, three_phase_handler): pp = PP.fake_pp() def mock_get_state_root(ledger_id): if ledger_id == TOKEN_LEDGER_ID: return PP.plugin_data[FEES][f.STATE_ROOT.nm] else: return 'Pulled state root from a different ledger than sovtoken' def mock_get_txn_root(ledger_id): if ledger_id == TOKEN_LEDGER_ID: return PP.plugin_data[FEES][f.TXN_ROOT.nm] else: return 'Pulled txn root from a different ledger than sovtoken' state_root_deserialized = state_roots_serializer.deserialize( PP.plugin_data[FEES][f.STATE_ROOT.nm]) txn_root_deserialized = state_roots_serializer.deserialize( PP.plugin_data[FEES][f.TXN_ROOT.nm]) monkeypatch.setattr(three_phase_handler.fees_req_handler, 'fee_txns_in_current_batch', 1) monkeypatch.setattr(three_phase_handler.master_replica, 'stateRootHash', mock_get_state_root) monkeypatch.setattr(three_phase_handler.master_replica, 'txnRootHash', mock_get_txn_root) monkeypatch.setattr( three_phase_handler.fees_req_handler.token_state._trie, 'root_hash', state_root_deserialized) monkeypatch.setattr(three_phase_handler.fees_req_handler.token_ledger, 'uncommittedRootHash', txn_root_deserialized) return three_phase_handler.add_to_pre_prepare(pp) @staticmethod def from_request(req, three_phase_handler): replica = three_phase_handler.master_replica args = [ replica.instId, replica.viewNo, replica.lastPrePrepareSeqNo + 1, get_utc_epoch(), [req.digest], init_discarded(), req.digest, DOMAIN_LEDGER_ID, replica.stateRootHash(TOKEN_LEDGER_ID), replica.txnRootHash(TOKEN_LEDGER_ID), 0, True ] return PrePrepare(*args)
def test_config_ledger_txns(looper, setup, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Do some writes and reads on the config ledger """ old_config_ledger_size = None old_bls_store_size = None state_root_hashes = set() state = txnPoolNodeSet[0].getState(CONFIG_LEDGER_ID) for node in txnPoolNodeSet: if old_config_ledger_size is None: old_config_ledger_size = len(node.getLedger(CONFIG_LEDGER_ID)) old_bls_store_size = node.bls_bft.bls_store._kvs.size else: assert len( node.getLedger(CONFIG_LEDGER_ID)) == old_config_ledger_size assert node.bls_bft.bls_store._kvs.size == old_bls_store_size # Do a write txn key, val = 'test_key', 'test_val' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len( node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add( state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key', 'test_val1' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len( node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add( state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key1', 'test_val11' write(key, val, looper, sdk_pool_handle, sdk_wallet_client) for node in txnPoolNodeSet: assert len( node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add( state_roots_serializer.serialize(state.committedHeadHash)) assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val for node in txnPoolNodeSet: # Not all batches might have BLS-sig but at least one of them will have assert node.bls_bft.bls_store._kvs.size > old_bls_store_size # At least one state root hash should be in the BLS store found = False for root_hash in state_root_hashes: if node.bls_bft.bls_store.get(root_hash) is not None: found = True break assert found
def save_multi_sig(request_handler): multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], '1' * 32) key = state_roots_serializer.serialize( bytes(request_handler.state.committedHeadHash)) request_handler.bls_store.put(key, multi_sig) return multi_sig
def initPoolState(self): self.node.initStateFromLedger(self.state, self.ledger, self.reqHandler) logger.info("{} initialized pool state: state root {}".format( self, state_roots_serializer.serialize( bytes(self.state.committedHeadHash))))
def add_bls_multi_sig(buy_req_handler, root_hash): encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash)) buy_req_handler.database_manager.bls_store.put(create_bls_multi_sig(encoded_root_hash))