def test_steward_can_promote_and_demote_own_node( be, do, poolNodesStarted, newStewardCli, trusteeCli, newNodeVals): ensurePoolIsOperable(be, do, newStewardCli) newNodeVals['newNodeData'][SERVICES] = [VALIDATOR] be(newStewardCli) do('send NODE dest={newNodeIdr} data={newNodeData}', mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8) newNodeVals['newNodeData'][SERVICES] = [] do('send NODE dest={newNodeIdr} data={newNodeData}', mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8) ensurePoolIsOperable(be, do, newStewardCli) for node in poolNodesStarted.nodes.values(): txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == newNodeVals['newNodeIdr'] assert SERVICES in txn_data[DATA] and txn_data[DATA][SERVICES] == [] newNodeVals['newNodeData'][SERVICES] = [VALIDATOR] do('send NODE dest={newNodeIdr} data={newNodeData}', mapper=newNodeVals, expect=NODE_REQUEST_COMPLETED, within=8) for node in poolNodesStarted.nodes.values(): txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == newNodeVals['newNodeIdr'] assert SERVICES in txn_data[DATA] and txn_data[DATA][SERVICES] == [VALIDATOR]
def _calc_catchup_till(self) -> Dict[int, CatchupTill]: audit_ledger = self._provider.ledger(AUDIT_LEDGER_ID) last_audit_txn = audit_ledger.get_last_committed_txn() if last_audit_txn is None: return {} catchup_till = {} last_audit_txn = get_payload_data(last_audit_txn) for ledger_id, final_size in last_audit_txn[ AUDIT_TXN_LEDGERS_SIZE].items(): ledger = self._provider.ledger(ledger_id) if ledger is None: logger.debug( "{} has audit ledger with references to nonexistent " "ledger with ID {}. Maybe it was frozen.".format( self, ledger_id)) continue start_size = ledger.size final_hash = last_audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if final_hash is None: if final_size != ledger.size: logger.error( "{} has corrupted audit ledger: " "it indicates that ledger {} has new transactions but doesn't have new txn root" .format(self, ledger_id)) return {} final_hash = Ledger.hashToStr( ledger.tree.root_hash) if final_size > 0 else None if isinstance(final_hash, int): audit_txn = audit_ledger.getBySeqNo(audit_ledger.size - final_hash) if audit_txn is None: logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references nonexistent txn with seq_no {} - {} = {}" .format(self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} audit_txn = get_payload_data(audit_txn) final_hash = audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if not isinstance(final_hash, str): logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references txn with seq_no {} - {} = {} " "which doesn't contain txn root".format( self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} catchup_till[ledger_id] = CatchupTill(start_size=start_size, final_size=final_size, final_hash=final_hash) return catchup_till
def _get_last_audited_primaries(self): audit = self.node.getLedger(AUDIT_LEDGER_ID) last_txn = audit.get_last_committed_txn() last_txn_prim_value = get_payload_data(last_txn)[AUDIT_TXN_PRIMARIES] if isinstance(last_txn_prim_value, int): seq_no = get_seq_no(last_txn) - last_txn_prim_value last_txn_prim_value = get_payload_data(audit.getBySeqNo(seq_no))[AUDIT_TXN_PRIMARIES] return last_txn_prim_value
def updateStateWithSingleTxn(self, txn, isCommitted=False): super().updateStateWithSingleTxn(txn, isCommitted) typ = get_type(txn) if typ == AUTH_RULE: self._update_auth_rule_state(get_payload_data(txn)) elif typ == AUTH_RULES: payload = get_payload_data(txn) for rule in payload.get(RULES): self._update_auth_rule_state(rule)
def validate(self, req: Request): status = None operation = req.operation typ = operation.get(TXN_TYPE) if typ not in [POOL_UPGRADE, POOL_CONFIG]: return origin = req.identifier try: originRole = self.idrCache.getRole(origin, isCommitted=False) except BaseException: raise UnauthorizedClientRequest( req.identifier, req.reqId, "Nym {} not added to the ledger yet".format(origin)) if typ == POOL_UPGRADE: currentVersion = Upgrader.getVersion() targetVersion = req.operation[VERSION] if Upgrader.compareVersions(currentVersion, targetVersion) < 0: # currentVersion > targetVersion raise InvalidClientRequest( req.identifier, req.reqId, "Upgrade to lower version is not allowed") trname = IndyTransactions.POOL_UPGRADE.name action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get( NAME, None) == req.operation.get( NAME, None) and get_payload_data(txn).get(VERSION) == req.operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, None) if status == START and action == START: raise InvalidClientRequest( req.identifier, req.reqId, "Upgrade '{}' is already scheduled".format( req.operation.get(NAME))) elif typ == POOL_CONFIG: trname = IndyTransactions.POOL_CONFIG.name action = None status = None r, msg = Authoriser.authorised( typ, originRole, field=ACTION, oldVal=status, newVal=action) if not r: raise UnauthorizedClientRequest( req.identifier, req.reqId, "{} cannot do {}".format( Roles.nameFromValue(originRole), trname))
def updateState(self, txns, isCommitted=False): for txn in txns: nodeNym = get_payload_data(txn).get(TARGET_NYM) data = get_payload_data(txn).get(DATA, {}) existingData = self.getNodeData(nodeNym, isCommitted=isCommitted) # Node data did not exist in state, so this is a new node txn, # hence store the author of the txn (steward of node) if not existingData: existingData[f.IDENTIFIER.nm] = get_from(txn) existingData.update(data) self.updateNodeData(nodeNym, existingData)
def validate(self, req: Request): status = None operation = req.operation typ = operation.get(TXN_TYPE) if typ not in [POOL_UPGRADE, POOL_CONFIG]: return origin = req.identifier try: originRole = self.idrCache.getRole(origin, isCommitted=False) except BaseException: raise UnauthorizedClientRequest( req.identifier, req.reqId, "Nym {} not added to the ledger yet".format(origin)) if typ == POOL_UPGRADE: currentVersion = Upgrader.getVersion() targetVersion = req.operation[VERSION] if Upgrader.compareVersions(currentVersion, targetVersion) < 0: # currentVersion > targetVersion raise InvalidClientRequest( req.identifier, req.reqId, "Upgrade to lower version is not allowed") trname = IndyTransactions.POOL_UPGRADE.name action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get(NAME, None) == req. operation.get(NAME, None) and get_payload_data(txn).get( VERSION) == req.operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, None) if status == START and action == START: raise InvalidClientRequest( req.identifier, req.reqId, "Upgrade '{}' is already scheduled".format( req.operation.get(NAME))) elif typ == POOL_CONFIG: trname = IndyTransactions.POOL_CONFIG.name action = None status = None r, msg = Authoriser.authorised(typ, originRole, field=ACTION, oldVal=status, newVal=action) if not r: raise UnauthorizedClientRequest( req.identifier, req.reqId, "{} cannot do {}".format(Roles.nameFromValue(originRole), trname))
def dynamic_validation(self, request: Request): self._validate_request_type(request) identifier, req_id, operation = get_request_data(request) status = '*' pkt_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) if pkt_to_upgrade: currentVersion, cur_deps = self.curr_pkt_info(pkt_to_upgrade) if not currentVersion: raise InvalidClientRequest( identifier, req_id, "Packet {} is not installed and cannot be upgraded".format( pkt_to_upgrade)) if all([APP_NAME not in d for d in cur_deps]): raise InvalidClientRequest( identifier, req_id, "Packet {} doesn't belong to pool".format(pkt_to_upgrade)) else: raise InvalidClientRequest(identifier, req_id, "Upgrade packet name is empty") targetVersion = operation[VERSION] reinstall = operation.get(REINSTALL, False) if not Upgrader.is_version_upgradable(currentVersion, targetVersion, reinstall): # currentVersion > targetVersion raise InvalidClientRequest(identifier, req_id, "Version is not upgradable") action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get(NAME, None) == operation.get( NAME, None) and get_payload_data(txn).get( VERSION) == operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, '*') if status == START and action == START: raise InvalidClientRequest( identifier, req_id, "Upgrade '{}' is already scheduled".format( operation.get(NAME))) if status == '*': auth_action = AuthActionAdd(txn_type=POOL_UPGRADE, field=ACTION, value=action) else: auth_action = AuthActionEdit(txn_type=POOL_UPGRADE, field=ACTION, old_value=status, new_value=action) self.write_request_validator.validate(request, [auth_action])
def test_get_fees_txn(helpers, fees_paid, nodeSetWithIntegratedTokenPlugin): seq_no = get_seq_no(fees_paid[FEES]) request = helpers.request.get_txn(TOKEN_LEDGER_ID, seq_no) responses = helpers.sdk.send_and_check_request_objects([request, ]) result = helpers.sdk.get_first_result(responses) data = result[DATA] for node in nodeSetWithIntegratedTokenPlugin: token_ledger = node.getLedger(TOKEN_LEDGER_ID) fee_txn = token_ledger.getBySeqNo(seq_no) assert get_payload_data(fee_txn) == get_payload_data(data) assert get_seq_no(fee_txn) == get_seq_no(data) assert get_txn_time(fee_txn) == get_txn_time(data)
def test_update_nym(nym_handler): identifier = "identifier" txn1 = create_nym_txn(identifier, STEWARD) txn2 = create_nym_txn(identifier, "") update_nym(nym_handler.state, identifier, STEWARD) nym_data = get_nym_details(nym_handler.state, identifier) assert get_payload_data(txn1)[ROLE] == nym_data[ROLE] update_nym(nym_handler.state, identifier, "") nym_data = get_nym_details(nym_handler.state, identifier) assert get_payload_data(txn2)[ROLE] == nym_data[ROLE]
def update_state(self, txn, prev_result, request, is_committed=False): self._validate_txn_type(txn) node_nym = get_payload_data(txn).get(TARGET_NYM) data = get_payload_data(txn).get(DATA, {}) existing_data = self.get_from_state(node_nym, is_committed=is_committed) # Node data did not exist in state, so this is a new node txn, # hence store the author of the txn (steward of node) if not existing_data: existing_data[f.IDENTIFIER.nm] = get_from(txn) existing_data.update(data) key = self.gen_state_key(txn) val = self.state_serializer.serialize(existing_data) self.state.set(key, val)
def update_version(self, txn): if get_type(txn) == POOL_UPGRADE and get_payload_data(txn).get(ACTION) == START: N = len(get_payload_data(txn).get(SCHEDULE, {})) self._f = (N - 1) // 3 elif get_type(txn) == NODE_UPGRADE and get_payload_data(txn)[DATA][ACTION] == COMPLETE: version = get_payload_data(txn)[DATA][VERSION] self._votes_for_new_version.setdefault(version, set()) self._votes_for_new_version[version].add(get_from(txn)) if len(self._votes_for_new_version[version]) > self._f: self._versions[get_txn_time(txn)] = version self._votes_for_new_version = SortedDict({v: senders for v, senders in self._votes_for_new_version.items() if v > version})
def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[int]): self._validate_request_type(request) identifier, req_id, operation = get_request_data(request) status = '*' pkg_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) targetVersion = operation[VERSION] reinstall = operation.get(REINSTALL, False) if not pkg_to_upgrade: raise InvalidClientRequest(identifier, req_id, "Upgrade package name is empty") try: res = self.upgrader.check_upgrade_possible(pkg_to_upgrade, targetVersion, reinstall) except Exception as exc: res = str(exc) if res: raise InvalidClientRequest(identifier, req_id, res) action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get(NAME, None) == operation.get( NAME, None) and get_payload_data(txn).get( VERSION) == operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, '*') if status == START and action == START: raise InvalidClientRequest( identifier, req_id, "Upgrade '{}' is already scheduled".format( operation.get(NAME))) if status == '*': auth_action = AuthActionAdd(txn_type=POOL_UPGRADE, field=ACTION, value=action) else: auth_action = AuthActionEdit(txn_type=POOL_UPGRADE, field=ACTION, old_value=status, new_value=action) self.write_req_validator.validate(request, [auth_action])
def test_get_key_for_old_root_keys_changed(bls_key_register_ledger, pool_node_txns, txnPoolNodeSet, node, looper, sdk_wallet_steward, sdk_pool_handle): old_bls_key = get_payload_data(pool_node_txns[0])[DATA][BLS_KEY] new_bls_key, key_proof = init_bls_keys(node.keys_dir, node.name) old_pool_root_hash = node.poolManager.state.committedHeadHash # change BLS keys sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=new_bls_key, new_key_proof=key_proof) new_pool_root_hash = node.poolManager.state.committedHeadHash assert old_pool_root_hash != new_pool_root_hash # get old and new keys bls_key = bls_key_register_ledger.get_key_by_name(node.name, old_pool_root_hash) assert bls_key assert IndyCryptoBlsUtils.bls_to_str(bls_key) == old_bls_key bls_key = bls_key_register_ledger.get_key_by_name(node.name, new_pool_root_hash) assert bls_key assert IndyCryptoBlsUtils.bls_to_str(bls_key) == new_bls_key
def prepare_buy_for_state(txn): from common.serializers.serialization import domain_state_serializer identifier = get_from(txn) req_id = get_req_id(txn) value = domain_state_serializer.serialize({"amount": get_payload_data(txn)['amount']}) key = TestDomainRequestHandler.prepare_buy_key(identifier, req_id) return key, value
def test_dirty_read(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Tests the case when read request comes before write request is not executed on some nodes """ slow_nodes = list(txnPoolNodeSet)[2:4] for node in slow_nodes: logger.debug("Making node {} slow".format(node)) make_node_slow(node) received_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) result = received_replies[0][1]["result"] seq_no = get_seq_no(result) _, did = sdk_wallet_client req = sdk_build_get_txn_request(looper, did, seq_no) request = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, req) received_replies = sdk_get_and_check_replies(looper, [request]) results = [str(get_payload_data(reply['result'][DATA])) for _, reply in received_replies] assert len(set(results)) == 1
def nymsAddedInQuickSuccession(looper, nodeSet, sdk_added_raw_attribute, trustAnchor, trustAnchorWallet): usigner = DidSigner() nym = usigner.verkey idy = Identity(identifier=nym) trustAnchorWallet.addTrustAnchoredIdentity(idy) # Creating a NYM request with same nym again req = idy.ledgerRequest() trustAnchorWallet._pending.appendleft((req, idy.identifier)) reqs = trustAnchorWallet.preparePending() trustAnchor.submitReqs(*reqs) def check(): assert trustAnchorWallet._trustAnchored[nym].seqNo timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) looper.run(eventually(check, timeout=timeout)) timeout = waits.expectedReqNAckQuorumTime() looper.run(eventually(checkNacks, trustAnchor, req.reqId, "is already added", retryWait=1, timeout=timeout)) count = 0 for node in nodeSet: for seq, txn in node.domainLedger.getAllTxn(): if get_type(txn) == NYM and get_payload_data(txn)[TARGET_NYM] == usigner.identifier: count += 1 assert (count == len(nodeSet))
def _calc_nodes_ledger_sizes(self, ledger_id: int) -> Dict[str, int]: result = self._nodes_ledger_sizes.get(ledger_id) if result is not None: return result nodes_audit_size = self._nodes_ledger_sizes[AUDIT_LEDGER_ID] if nodes_audit_size is None: return {} result = {} audit_ledger = self._provider.ledger(AUDIT_LEDGER_ID) for node_id, audit_seq_no in nodes_audit_size.items(): # It can happen so that during catching up audit ledger we caught up # less transactions than some nodes reported audit_seq_no = min(audit_seq_no, audit_ledger.size) audit_txn = audit_ledger.getBySeqNo(audit_seq_no) audit_txn = get_payload_data(audit_txn) # Not having a reference to some ledger in audit txn can be a valid # case if we just installed a plugin that adds a new ledger, but # no audit txns were written yet ledger_size = audit_txn[AUDIT_TXN_LEDGERS_SIZE].get(ledger_id, 0) result[node_id] = ledger_size return result
def updateNym(self, nym, txn, isCommitted=True): existingData = self.getNymDetails(self.state, nym, isCommitted=isCommitted) txn_data = get_payload_data(txn) newData = {} if not existingData: # New nym being added to state, set the TrustAnchor newData[f.IDENTIFIER.nm] = get_from(txn) # New nym being added to state, set the role and verkey to None, this makes # the state data always have a value for `role` and `verkey` since we allow # clients to omit specifying `role` and `verkey` in the request consider a # default value of None newData[ROLE] = None newData[VERKEY] = None if ROLE in txn_data: newData[ROLE] = txn_data[ROLE] if VERKEY in txn_data: newData[VERKEY] = txn_data[VERKEY] newData[F.seqNo.name] = get_seq_no(txn) newData[TXN_TIME] = get_txn_time(txn) existingData.update(newData) val = self.stateSerializer.serialize(existingData) key = self.nym_to_state_key(nym) self.state.set(key, val) return existingData
def _update_state_with_single_txn(self, txn, is_committed=False): typ = get_type(txn) if typ == SET_FEES: payload = get_payload_data(txn) fees_from_req = payload.get(FEES) current_fees = self._get_fees() current_fees.update(fees_from_req) for fees_alias, fees_value in fees_from_req.items(): self._set_to_state(build_path_for_set_fees(alias=fees_alias), fees_value) self._set_to_state(build_path_for_set_fees(), current_fees) elif typ == FEE_TXN: for utxo in txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA][INPUTS]: TokenReqHandler.spend_input(state=self.token_state, utxo_cache=self.utxo_cache, address=utxo[ADDRESS], seq_no=utxo[SEQNO], is_committed=is_committed) seq_no = get_seq_no(txn) for output in txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA][OUTPUTS]: TokenReqHandler.add_new_output(state=self.token_state, utxo_cache=self.utxo_cache, output=Output( output[ADDRESS], seq_no, output[AMOUNT]), is_committed=is_committed)
def _update_state_mint_public_txn(self, txn, is_committed=False): payload = get_payload_data(txn) seq_no = get_seq_no(txn) for output in payload[OUTPUTS]: self._add_new_output(Output(output["address"], seq_no, output["amount"]), is_committed=is_committed)
def test_update_state(revoc_reg_entry_handler, revoc_reg_entry_request, revoc_reg_def_handler, revoc_reg_def_request): # create revoc_req_def seq_no = 1 txn_time = 1560241030 revoc_reg_def_request.operation[ISSUANCE_TYPE] = ISSUANCE_BY_DEFAULT txn = reqToTxn(revoc_reg_def_request) append_txn_metadata(txn, seq_no, txn_time) path = RevocRegDefHandler.prepare_revoc_def_for_state(txn, path_only=True) revoc_reg_def_handler.update_state(txn, None, revoc_reg_def_request) # create revoc_req_entry seq_no = 2 txn_time = 1560241033 revoc_reg_entry_request.operation[REVOC_REG_DEF_ID] = path.decode() txn = reqToTxn(revoc_reg_entry_request) append_txn_metadata(txn, seq_no, txn_time) revoc_reg_entry_handler.update_state(txn, None, revoc_reg_entry_request) # check state for revoc_reg_entry txn_data = get_payload_data(txn) txn_data[f.SEQ_NO.nm] = seq_no txn_data[TXN_TIME] = txn_time assert revoc_reg_entry_handler.get_from_state( RevocRegEntryHandler.prepare_revoc_reg_entry_for_state( txn, path_only=True)) == (txn_data, seq_no, txn_time) # check state for revoc_reg_entry txn_data[VALUE] = {ACCUM: txn_data[VALUE][ACCUM]} path, _ = RevocRegEntryHandler.prepare_revoc_reg_entry_accum_for_state(txn) assert revoc_reg_entry_handler.get_from_state(path) == (txn_data, seq_no, txn_time)
def check_ledger_after_upgrade( node_set, allowed_actions, ledger_size, expected_version, allowed_txn_types=[NODE_UPGRADE], node_ids=None): versions = set() for node in node_set: # print(len(node.configLedger)) assert len(node.configLedger) == ledger_size ids = set() for _, txn in node.configLedger.getAllTxn(): type = get_type(txn) assert type in allowed_txn_types txn_data = get_payload_data(txn) data = txn_data if type == NODE_UPGRADE: data = txn_data[DATA] assert data[ACTION] assert data[ACTION] in allowed_actions ids.add(get_from(txn)) assert data[VERSION] versions.add(data[VERSION]) ids.add(node.id) if node_ids: assert ids == set(node_ids) assert len(versions) == 1 assert list(versions)[0] == expected_version
def test_get_key_for_old_root_keys_changed(bls_key_register_ledger, pool_node_txns, txnPoolNodeSet, node, looper, sdk_wallet_steward, sdk_pool_handle): old_bls_key = get_payload_data(pool_node_txns[0])[DATA][BLS_KEY] new_bls_key, key_proof = init_bls_keys(node.keys_dir, node.name) old_pool_root_hash = node.poolManager.state.committedHeadHash # change BLS keys sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=new_bls_key, new_key_proof=key_proof) new_pool_root_hash = node.poolManager.state.committedHeadHash assert old_pool_root_hash != new_pool_root_hash # get old and new keys bls_key = bls_key_register_ledger.get_key_by_name(node.name, old_pool_root_hash) assert bls_key assert bls_key == old_bls_key bls_key = bls_key_register_ledger.get_key_by_name(node.name, new_pool_root_hash) assert bls_key assert bls_key == new_bls_key
def test_fees_utxo_reuse( helpers, fees_paid, fees_set, address_main ): """ Check that utxo used in sovtokenfees cannot be reused """ nym_fees_data = get_payload_data(fees_paid[FEES]) inputs = nym_fees_data[INPUTS] outputs = nym_fees_data[OUTPUTS] fee_amount = fees_set[FEES][NYM] req = helpers.request.nym() fee_sigs = helpers.request.fees_signatures( inputs, outputs, req.digest ) fees = [inputs, outputs, fee_sigs] setattr(req, FEES, fees) with pytest.raises(RequestRejectedException): helpers.sdk.send_and_check_request_objects([req])
def test_fill_ts_store_after_catchup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) node_to_disconnect = txnPoolNodeSet[-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) sdk_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 2) node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = node_to_disconnect looper.run(checkNodesConnected(txnPoolNodeSet)) waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) req_handler = node_to_disconnect.read_manager.request_handlers[GET_BUY] for reply in sdk_replies: key = BuyHandler.prepare_buy_key(get_from(reply[1]['result']), get_req_id(reply[1]['result'])) root_hash = req_handler.database_manager.ts_store.get_equal_or_prev( get_txn_time(reply[1]['result'])) assert root_hash from_state = req_handler.state.get_for_root_hash(root_hash=root_hash, key=key) assert domain_state_serializer.deserialize(from_state)['amount'] == \ get_payload_data(reply[1]['result'])['amount']
def test_update_state(txn_author_agreement_handler, taa_request): seq_no = 1 txn_time = 1560241033 txn_id = "id" txn = reqToTxn(taa_request) payload = get_payload_data(txn) text = payload[TXN_AUTHOR_AGREEMENT_TEXT] version = payload[TXN_AUTHOR_AGREEMENT_VERSION] digest = StaticTAAHelper.taa_digest(text, version) append_txn_metadata(txn, seq_no, txn_time, txn_id) state_value = { TXN_AUTHOR_AGREEMENT_TEXT: text, TXN_AUTHOR_AGREEMENT_VERSION: version } txn_author_agreement_handler.update_state(txn, None, taa_request) assert txn_author_agreement_handler.get_from_state( StaticTAAHelper.state_path_taa_digest(digest)) == (state_value, seq_no, txn_time) assert txn_author_agreement_handler.state.get( StaticTAAHelper.state_path_taa_latest()) == digest assert txn_author_agreement_handler.state.get( StaticTAAHelper.state_path_taa_version(version)) == digest
def id(self): if not self._id: for _, txn in self.ledger.getAllTxn(): txn_data = get_payload_data(txn) if self.name == txn_data[DATA][ALIAS]: self._id = txn_data[TARGET_NYM] return self._id
def prepare_for_state_read(req: Request): if req.txn_type == "buy": txn = reqToTxn(req) key = BuyHandler.prepare_buy_key(req.identifier, req.reqId) value = domain_state_serializer.serialize( {"amount": get_payload_data(txn)['amount']}) return key, value
def _update_state_with_single_txn(self, txn, is_committed=False): typ = get_type(txn) if typ == SET_FEES: payload = get_payload_data(txn) existing_fees = self._get_fees(is_committed=is_committed) existing_fees.update(payload[FEES]) val = self.state_serializer.serialize(existing_fees) self.state.set(self.fees_state_key, val) self.fees = existing_fees elif typ == FEE_TXN: for utxo in txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA][INPUTS]: TokenReqHandler.spend_input(state=self.token_state, utxo_cache=self.utxo_cache, address=utxo[ADDRESS], seq_no=utxo[SEQNO], is_committed=is_committed) seq_no = get_seq_no(txn) for output in txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA][OUTPUTS]: TokenReqHandler.add_new_output(state=self.token_state, utxo_cache=self.utxo_cache, output=Output( output[ADDRESS], seq_no, output[AMOUNT]), is_committed=is_committed) else: logger.warning('Unknown type {} found while updating ' 'state with txn {}'.format(typ, txn))
def __fill_ledger_root_hash(self, txn, three_pc_batch, lid, last_audit_txn): target_ledger_id = three_pc_batch.ledger_id last_audit_txn_data = get_payload_data( last_audit_txn) if last_audit_txn is not None else None # 1. ledger is changed in this batch => root_hash if lid == target_ledger_id: txn[AUDIT_TXN_LEDGER_ROOT][str(lid)] = Ledger.hashToStr( three_pc_batch.txn_root) # 2. This ledger is never audited, so do not add the key elif last_audit_txn_data is None or str( lid) not in last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT]: return # 3. ledger is not changed in last batch => the same audit seq no elif isinstance(last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][str(lid)], int): txn[AUDIT_TXN_LEDGER_ROOT][str( lid)] = last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][str(lid)] # 4. ledger is changed in last batch but not changed now => seq_no of last audit txn elif last_audit_txn_data: txn[AUDIT_TXN_LEDGER_ROOT][str(lid)] = get_seq_no(last_audit_txn)
def update_state(self, txn, prev_result, request, is_committed=False): self._validate_txn_type(txn) payload = get_payload_data(txn) seq_no = get_seq_no(txn) txn_time = get_txn_time(txn) self._update_txn_author_agreement_acceptance_mechanisms( payload, seq_no, txn_time)
def test_dirty_read(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Tests the case when read request comes before write request is not executed on some nodes """ slow_nodes = list(txnPoolNodeSet)[2:4] for node in slow_nodes: logger.debug("Making node {} slow".format(node)) make_node_slow(node) received_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) result = received_replies[0][1]["result"] seq_no = get_seq_no(result) _, did = sdk_wallet_client req = sdk_build_get_txn_request(looper, did, seq_no) request = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, req) received_replies = sdk_get_and_check_replies(looper, [request]) results = [ str(get_payload_data(reply['result'][DATA])) for _, reply in received_replies ] assert len(set(results)) == 1
async def getSchema(self, id: ID) -> Optional[Schema]: data = None issuer_id = None if id.schemaKey: issuer_id = id.schemaKey.issuerId op = { TARGET_NYM: issuer_id, TXN_TYPE: GET_SCHEMA, DATA: { NAME: id.schemaKey.name, VERSION: id.schemaKey.version, } } data, seqNo = await self._sendGetReq(op) else: op = { f.LEDGER_ID.nm: DOMAIN_LEDGER_ID, TXN_TYPE: GET_TXNS, DATA: id.schemaId } res, seqNo = await self._sendGetReq(op) if res and get_type(res) == SCHEMA: issuer_id = get_from(res) data = get_payload_data(res)[DATA] if not data or ATTR_NAMES not in data: raise SchemaNotFoundError('No schema with ID={} and key={}'.format( id.schemaId, id.schemaKey)) return Schema(name=data[NAME], version=data[VERSION], attrNames=data[ATTR_NAMES], issuerId=issuer_id, seqId=seqNo)
def _load_nodes_order_from_ledger(self): self._ordered_node_ids = OrderedDict() for _, txn in self.ledger.getAllTxn(): if get_type(txn) == NODE: txn_data = get_payload_data(txn) self._set_node_order(txn_data[TARGET_NYM], txn_data[DATA][ALIAS])
def gen_txn_path(self, txn): """Return path to state as 'str' type or None""" txn_type = get_type(txn) if txn_type not in self.state_update_handlers: logger.error('Cannot generate id for txn of type {}'.format(txn_type)) return None if txn_type == NYM: nym = get_payload_data(txn).get(TARGET_NYM) binary_digest = domain.make_state_path_for_nym(nym) return hexlify(binary_digest).decode() elif txn_type == ATTRIB: path = domain.prepare_attr_for_state(txn, path_only=True) return path.decode() elif txn_type == SCHEMA: path = domain.prepare_schema_for_state(txn, path_only=True) return path.decode() elif txn_type == CLAIM_DEF: path = domain.prepare_claim_def_for_state(txn, path_only=True) return path.decode() elif txn_type == REVOC_REG_DEF: path = domain.prepare_revoc_def_for_state(txn, path_only=True) return path.decode() elif txn_type == REVOC_REG_ENTRY: path = domain.prepare_revoc_reg_entry_for_state(txn, path_only=True) return path.decode() raise NotImplementedError("path construction is not implemented for type {}".format(txn_type))
def handleConfigTxn(self, txn) -> None: """ Handles transaction of type POOL_CONFIG :param txn: """ if get_type(txn) == POOL_CONFIG: self.writes = get_payload_data(txn)[WRITES]
def get_data_for_role(pool_txn_data, role): name_and_seeds = [] for txn in pool_txn_data['txns']: txn_data = get_payload_data(txn) if txn_data.get(ROLE) == role: name = txn_data[ALIAS] name_and_seeds.append((name, pool_txn_data['seeds'][name])) return name_and_seeds
def txnPoolCliNodeReg(poolTxnData): cliNodeReg = {} for txn in poolTxnData["txns"]: if get_type(txn) == NODE: data = get_payload_data(txn)[DATA] cliNodeReg[data[ALIAS] + CLIENT_STACK_SUFFIX] = HA(data[CLIENT_IP], data[CLIENT_PORT]) return cliNodeReg
def nodeExistsInLedger(self, nym): # Since PoolLedger is going to be small so using # `getAllTxn` is fine for _, txn in self.ledger.getAllTxn(): if get_type(txn) == NODE and \ get_payload_data(txn)[TARGET_NYM] == nym: return True return False
def trusteeData(poolTxnTrusteeNames, updatedPoolTxnData): ret = [] for name in poolTxnTrusteeNames: seed = updatedPoolTxnData["seeds"][name] txn = next( (txn for txn in updatedPoolTxnData["txns"] if get_payload_data(txn)[ALIAS] == name), None) ret.append((name, seed.encode(), txn)) return ret
def getNodesServices(self): # Returns services for each node srvs = dict() for _, txn in self.ledger.getAllTxn(): txn_data = get_payload_data(txn) if get_type(txn) == NODE and \ txn_data.get(DATA, {}).get(SERVICES) is not None: srvs.update({txn_data[TARGET_NYM]: txn_data[DATA][SERVICES]}) return srvs
def set_to_state(self, txn): # Set REVOC_REG_ENTRY path, value_bytes = domain.prepare_revoc_reg_entry_for_state(txn) self.state.set(path, value_bytes) # Set ACCUM from REVOC_REG_ENTRY txn_data = get_payload_data(txn) txn_data[VALUE] = {ACCUM: txn_data[VALUE][ACCUM]} path, value_bytes = domain.prepare_revoc_reg_entry_accum_for_state(txn) self.state.set(path, value_bytes)
def testSuspendNodeWhichWasNeverActive(be, do, trusteeCli, nymAddedOut, poolNodesStarted, trusteeMap): """ Add a node without services field and check that the ledger does not contain the `services` field and check that it can be blacklisted and the ledger has `services` as empty list """ newStewardSeed = '0000000000000000000KellySteward2' newStewardIdr = 'DqCx7RFEpSUMZbV2mH89XPH6JT3jMvDNU55NTnBHsQCs' be(trusteeCli) do('send NYM dest={{remote}} role={role}'.format( role=Roles.STEWARD.name), within=5, expect=nymAddedOut, mapper={'remote': newStewardIdr}) do('new key with seed {}'.format(newStewardSeed)) nport, cport = (_[1] for _ in genHa(2)) nodeId = '6G9QhQa3HWjRKeRmEvEkLbWWf2t7cw6KLtafzi494G4G' newNodeVals = { 'newNodeIdr': nodeId, 'newNodeData': {'client_port': cport, 'client_ip': '127.0.0.1', 'alias': 'Node6', 'node_ip': '127.0.0.1', 'node_port': nport } } doSendNodeCmd(do, newNodeVals) for node in poolNodesStarted.nodes.values(): txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == nodeId assert SERVICES not in txn_data[DATA] do('new key with seed {}'.format(trusteeMap['trusteeSeed'])) newNodeVals['newNodeData'][SERVICES] = [] doSendNodeCmd(do, newNodeVals) for node in poolNodesStarted.nodes.values(): txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == nodeId assert SERVICES in txn_data[DATA] and txn_data[DATA][SERVICES] == []
def _load_nodes_order_from_ledger(self): self._ordered_node_ids = OrderedDict() self._ordered_node_services = {} for _, txn in self.ledger.getAllTxn(): if get_type(txn) == NODE: txn_data = get_payload_data(txn) self._set_node_ids_in_cash(txn_data[TARGET_NYM], txn_data[DATA][ALIAS]) self._set_node_services_in_cash(txn_data[TARGET_NYM], txn_data[DATA].get(SERVICES, None))
def test_genesis_nodes(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): assert len(txnPoolNodeSet) == nodeCount for node in txnPoolNodeSet: assertEquality(node.poolLedger.size, nodeCount) stw_count = sum(1 for _, txn in node.domainLedger.getAllTxn() if (get_type(txn) == NYM) and (get_payload_data(txn).get(ROLE) == STEWARD)) assertEquality(stw_count, nodeCount) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def _addRevocRegEntry(self, txn, isCommitted=False) -> None: current_entry, revoc_def = self._get_current_revoc_entry_and_revoc_def( author_did=get_from(txn), revoc_reg_def_id=get_payload_data(txn)[REVOC_REG_DEF_ID], req_id=get_req_id(txn) ) writer_cls = self.get_revocation_strategy( revoc_def[VALUE][ISSUANCE_TYPE]) writer = writer_cls(self.state) writer.write(current_entry, txn)
def prepare_schema_for_state(txn): origin = get_from(txn) txn_data = get_payload_data(txn) data = deepcopy(txn_data.get(DATA)) schema_name = data.pop(NAME) schema_version = data.pop(VERSION) path = make_state_path_for_schema(origin, schema_name, schema_version) seq_no = get_seq_no(txn) txn_time = get_txn_time(txn) value_bytes = encode_state_value(data, seq_no, txn_time) return path, value_bytes
def check_get_nym_by_name(txnPoolNodeSet, pool_node_txns): for i in range(nodeCount): node = txnPoolNodeSet[i] pool_manager = node.poolManager node_name = node.name node_nym = pool_manager.get_nym_by_name(node_name) expected_data = get_payload_data(pool_node_txns[i])[TARGET_NYM] assert node_nym assert node_nym == expected_data
def _load_keys_for_root(self): keys = {} for _, txn in self._ledger.getAllTxn(): if get_type(txn) == NODE: data = get_payload_data(txn)[DATA] if not config.VALIDATE_BLS_SIGNATURE_WITHOUT_KEY_PROOF and \ data.get(BLS_KEY_PROOF, None) is None: logger.warning("{} has no proof of possession for BLS public key.".format(data[ALIAS])) keys[data[ALIAS]] = None else: keys[data[ALIAS]] = data.get(BLS_KEY, None) return keys
def _attrib_data_from_reply(self, result): dt = get_payload_data(result) dest = dt[TARGET_NYM] origin = get_from(result) val = None if RAW in dt: val = json.loads(dt[RAW]) elif ENC in dt: val = dt[ENC] elif HASH in dt: val = dt[HASH] return origin, dest, val
def transform_attrib_for_ledger(txn): """ Creating copy of result so that `RAW`, `ENC` or `HASH` can be replaced by their hashes. We do not insert actual attribute data in the ledger but only the hash of it. """ txn = deepcopy(txn) txn_data = get_payload_data(txn) attr_type, _, value = domain.parse_attr_txn(txn_data) if attr_type in [RAW, ENC]: txn_data[attr_type] = domain.hash_of(value) if value else '' return txn
def test_pool_genesis_txns(bootstrap, pool_genesis_file): serializer = JsonSerializer() with open(pool_genesis_file) as f: for line in store_utils.cleanLines(f.readlines()): txn = serializer.deserialize(line) assert get_seq_no(txn) assert get_txn_id(txn) assert get_payload_data(txn) assert get_type(txn) == NODE assert get_version(txn) == "1" assert get_protocol_version(txn) is None assert get_payload_data(txn)[TARGET_NYM] data = get_payload_data(txn).get(DATA) assert data assert data[ALIAS] assert data[CLIENT_IP] assert data[CLIENT_PORT] assert data[NODE_IP] assert data[NODE_PORT] assert data[SERVICES] == [VALIDATOR] assert data[BLS_KEY] assert data[BLS_KEY_PROOF]
def _addNym(self, txn, isCommitted=False) -> None: txn_data = get_payload_data(txn) nym = txn_data.get(TARGET_NYM) data = { f.IDENTIFIER.nm: get_from(txn), f.SEQ_NO.nm: get_seq_no(txn), TXN_TIME: get_txn_time(txn) } if ROLE in txn_data: data[ROLE] = txn_data.get(ROLE) if VERKEY in txn_data: data[VERKEY] = txn_data.get(VERKEY) self.updateNym(nym, txn, isCommitted=isCommitted)
def prepare_attr_for_state(txn): """ Make key(path)-value pair for state from ATTRIB or GET_ATTR :return: state path, state value, value for attribute store """ assert get_type(txn) == ATTRIB txn_data = get_payload_data(txn) nym = txn_data[TARGET_NYM] attr_type, attr_key, value = parse_attr_txn(txn_data) hashed_value = hash_of(value) if value else '' seq_no = get_seq_no(txn) txn_time = get_txn_time(txn) value_bytes = encode_state_value(hashed_value, seq_no, txn_time) path = make_state_path_for_attr(nym, attr_key, attr_type == HASH) return attr_type, path, value, hashed_value, value_bytes
def getVerkey(self, identifier): # TODO: This is very inefficient verkey = None found = False for _, txn in self.ledger.getAllTxn(): txn_data = get_payload_data(txn) if txn_data[TARGET_NYM] == identifier: found = True if txn_data.get(VERKEY): verkey = txn_data[VERKEY] if not found: raise UnknownIdentifier(identifier) verkey = verkey or identifier return verkey
def test_domain_genesis_txns(bootstrap, domain_genesis_file): serializer = JsonSerializer() with open(domain_genesis_file) as f: i = 0 for line in store_utils.cleanLines(f.readlines()): txn = serializer.deserialize(line) assert get_seq_no(txn) assert get_payload_data(txn) assert get_type(txn) == NYM assert get_version(txn) == "1" assert get_protocol_version(txn) is None assert get_payload_data(txn)[VERKEY] assert get_payload_data(txn)[TARGET_NYM] assert ALIAS not in get_payload_data(txn) # expect Trustees, then Stewards, then Clients if 0 <= i < TRUSTEE_COUNT: expected_role = TRUSTEE elif TRUSTEE_COUNT <= i < TRUSTEE_COUNT + NODE_COUNT: expected_role = STEWARD else: expected_role = None assert get_payload_data(txn).get(ROLE) == expected_role i += 1