def test_request_executed_once_and_without_failing_behind( tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Checks that all requests executed only once and without failing behind in wrote transactions 1. Send request 2. When it is ordered - send checkpoint to replica to force it make garbage collection 3. Check that ordered request executed and client receives reply 4. Check that ledgers on all nodes are the same and contain all expected transactions """ number_of_requests = 5 tconf.CHK_FREQ = 1 for node in txnPoolNodeSet: for replica in node.replicas: set_checkpoint_faking(replica) replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, number_of_requests) expected = [get_req_id(reply["result"]) for _, reply in replies] for node in txnPoolNodeSet: real_ledger_state = [ get_req_id(txn) for _, txn in node.getLedger(DOMAIN_LEDGER_ID).getAllTxn() if get_req_id(txn) is not None ] assert expected == real_ledger_state
def test_request_executed_once_and_without_failing_behind(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Checks that all requests executed only once and without failing behind in wrote transactions 1. Send request 2. When it is ordered - send checkpoint to replica to force it make garbage collection 3. Check that ordered request executed and client receives reply 4. Check that ledgers on all nodes are the same and contain all expected transactions """ number_of_requests = 5 tconf.CHK_FREQ = 1 for node in txnPoolNodeSet: for replica in node.replicas.values(): set_checkpoint_faking(replica) replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, number_of_requests) expected = [get_req_id(reply["result"]) for _, reply in replies] for node in txnPoolNodeSet: real_ledger_state = [get_req_id(txn) for _, txn in node.getLedger(DOMAIN_LEDGER_ID).getAllTxn() if get_req_id(txn) is not None] assert expected == real_ledger_state
def test_token_req_handler_apply_MINT_PUBLIC_success_with_inputs( helpers, addresses, token_handler_a): [address1, address2] = addresses outputs = [{ "address": address1, "amount": 40 }, { "address": address2, "amount": 20 }] request = helpers.request.mint(outputs) request.operation[INPUTS] = [[address1, 1]] seq_no, txn = token_handler_a.apply(request, CONS_TIME) expected = TxnResponse( MINT_PUBLIC, request.operation, signatures=request.signatures, req_id=request.reqId, frm=request._identifier, ).form_response() assert get_payload_data(txn) == get_payload_data(expected) assert get_req_id(txn) == get_req_id(expected) assert get_from(txn) == get_from(expected) assert get_sorted_signatures(txn) == get_sorted_signatures(txn)
def testClientGetsResponseWithoutConsensusForUsedReqId( nodeSet, looper, steward, addedTrustAnchor, trustAnchor, userWalletA, attributeName, attributeData, addedRawAttribute): lastReqId = None replies = {} for msg, sender in reversed(trustAnchor.inBox): if msg[OP_FIELD_NAME] == REPLY: if not lastReqId: lastReqId = get_req_id(msg[f.RESULT.nm]) if get_req_id(msg[f.RESULT.nm]) == lastReqId: replies[sender] = msg if len(replies) == len(nodeSet): break trustAnchorWallet = addedTrustAnchor attrib = Attribute(name=attributeName, origin=trustAnchorWallet.defaultId, value=attributeData, dest=userWalletA.defaultId, ledgerStore=LedgerStore.RAW) trustAnchorWallet.addAttribute(attrib) req = trustAnchorWallet.preparePending()[0] _, key = trustAnchorWallet._prepared.pop((req.identifier, req.reqId)) req.reqId = lastReqId req.signature = trustAnchorWallet.signMsg( msg=req.signingState(identifier=req.identifier), identifier=req.identifier) trustAnchorWallet._prepared[req.identifier, req.reqId] = req, key trustAnchor.submitReqs(req) def chk(): nonlocal trustAnchor, lastReqId, replies for node in nodeSet: last = node.spylog.getLast(TestNode.getReplyFromLedger.__name__) assert last result = last.result assert result is not None replies[node.clientstack.name][f.RESULT.nm].pop(TXN_TIME, None) result.result.pop(TXN_TIME, None) assert {k: v for k, v in result.result.items() if v is not None}.items() <= \ replies[node.clientstack.name][f.RESULT.nm].items() timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout))
def check_request_ordered(node, request: Request): # it's ok to iterate through all txns since this is a test for seq_no, txn in node.domainLedger.getAllTxn(): if get_req_id(txn) is None: continue if get_from(txn) is None: continue if get_req_id(txn) != request.reqId: continue if get_from(txn) != request.identifier: continue return True raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def test_fill_ts_store_after_catchup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) node_to_disconnect = txnPoolNodeSet[-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) sdk_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 2) node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = node_to_disconnect looper.run(checkNodesConnected(txnPoolNodeSet)) waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) req_handler = node_to_disconnect.read_manager.request_handlers[GET_BUY] for reply in sdk_replies: key = BuyHandler.prepare_buy_key(get_from(reply[1]['result']), get_req_id(reply[1]['result'])) root_hash = req_handler.database_manager.ts_store.get_equal_or_prev( get_txn_time(reply[1]['result'])) assert root_hash from_state = req_handler.state.get_for_root_hash(root_hash=root_hash, key=key) assert domain_state_serializer.deserialize(from_state)['amount'] == \ get_payload_data(reply[1]['result'])['amount']
def deduct_fees(self, request, cons_time, ledger_id, seq_no, txn): txn_type = request.operation[TXN_TYPE] fees_key = "{}#{}".format(txn_type, seq_no) if txn_type != XFER_PUBLIC and FeesAuthorizer.has_fees(request): inputs, outputs, signatures = getattr(request, f.FEES.nm) # This is correct since FEES is changed from config ledger whose # transactions have no fees fees = FeesAuthorizer.calculate_fees_from_req( self.utxo_cache, request) sigs = {i[ADDRESS]: s for i, s in zip(inputs, signatures)} txn = { OPERATION: { TXN_TYPE: FEE_TXN, INPUTS: inputs, OUTPUTS: outputs, REF: self.get_ref_for_txn_fees(ledger_id, seq_no), FEES: fees, }, f.SIGS.nm: sigs, f.REQ_ID.nm: get_req_id(txn), f.PROTOCOL_VERSION.nm: 2, } txn = reqToTxn(txn) self.token_ledger.append_txns_metadata([txn], txn_time=cons_time) _, txns = self.token_ledger.appendTxns( [TokenReqHandler.transform_txn_for_ledger(txn)]) self.updateState(txns) self.fee_txns_in_current_batch += 1 self.deducted_fees[fees_key] = fees return txn
def prepare_buy_for_state(txn): from common.serializers.serialization import domain_state_serializer identifier = get_from(txn) req_id = get_req_id(txn) value = domain_state_serializer.serialize({"amount": get_payload_data(txn)['amount']}) key = TestDomainRequestHandler.prepare_buy_key(identifier, req_id) return key, value
def apply_request(self, request: Request, batch_ts, prev_result): txn_type = request.operation[TXN_TYPE] seq_no = get_seq_no(prev_result) cons_time = get_txn_time(prev_result) if FeesAuthorizer.has_fees(request): inputs, outputs, signatures = getattr(request, f.FEES.nm) # This is correct since FEES is changed from config ledger whose # transactions have no fees fees = FeesAuthorizer.calculate_fees_from_req( self.utxo_cache, request) sigs = {i[ADDRESS]: s for i, s in zip(inputs, signatures)} txn = { OPERATION: { TXN_TYPE: FEE_TXN, INPUTS: inputs, OUTPUTS: outputs, REF: self._get_ref_for_txn_fees(seq_no), FEES: fees, }, f.SIGS.nm: sigs, f.REQ_ID.nm: get_req_id(prev_result), f.PROTOCOL_VERSION.nm: 2, } txn = reqToTxn(txn) self.token_ledger.append_txns_metadata([txn], txn_time=cons_time) _, txns = self.token_ledger.appendTxns([txn]) self.update_token_state(txn, request) self._fees_tracker.fees_in_current_batch += 1 self._fees_tracker.add_deducted_fees(txn_type, seq_no, fees) return None, None, prev_result
def prepare_buy_for_state(txn): identifier = get_from(txn) req_id = get_req_id(txn) value = domain_state_serializer.serialize( {"amount": get_payload_data(txn)['amount']}) key = BuyHandler.prepare_buy_key(identifier, req_id) return key, value
def test_proof_in_write_reply(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): resp = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) req = resp[0][0] result = resp[0][1]['result'] assert result assert get_type(result) == "buy" assert get_from(result) == req[f.IDENTIFIER.nm] assert get_req_id(result) == req[f.REQ_ID.nm] assert get_seq_no(result) assert get_txn_time(result) assert STATE_PROOF in result state_proof = result[STATE_PROOF] assert ROOT_HASH in state_proof assert MULTI_SIGNATURE in state_proof assert PROOF_NODES in state_proof multi_sig = state_proof[MULTI_SIGNATURE] assert MULTI_SIGNATURE_SIGNATURE in multi_sig assert MULTI_SIGNATURE_PARTICIPANTS in multi_sig assert MULTI_SIGNATURE_VALUE in multi_sig multi_sig_value = multi_sig[MULTI_SIGNATURE_VALUE] assert MULTI_SIGNATURE_VALUE_LEDGER_ID in multi_sig_value assert MULTI_SIGNATURE_VALUE_STATE_ROOT in multi_sig_value assert MULTI_SIGNATURE_VALUE_TXN_ROOT in multi_sig_value assert MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT in multi_sig_value assert MULTI_SIGNATURE_VALUE_TIMESTAMP in multi_sig_value assert validate_multi_signature(state_proof, txnPoolNodeSet) assert validate_proof_for_write(result)
def update_state(self, txn, prev_result, request, is_committed=False): self._validate_txn_type(txn) current_entry, revoc_def = self._get_current_revoc_entry_and_revoc_def( author_did=get_from(txn), revoc_reg_def_id=get_payload_data(txn)[REVOC_REG_DEF_ID], req_id=get_req_id(txn)) writer_cls = self.get_revocation_strategy(revoc_def[ISSUANCE_TYPE]) writer = writer_cls(self.state) writer.write(current_entry, txn)
def _addRevocRegEntry(self, txn, isCommitted=False) -> None: current_entry, revoc_def = self._get_current_revoc_entry_and_revoc_def( author_did=get_from(txn), revoc_reg_def_id=get_payload_data(txn)[REVOC_REG_DEF_ID], req_id=get_req_id(txn)) writer_cls = self.get_revocation_strategy( revoc_def[VALUE][ISSUANCE_TYPE]) writer = writer_cls(self.state) writer.write(current_entry, txn)
def _addRevocRegEntry(self, txn, isCommitted=False) -> None: current_entry, revoc_def = self._get_current_revoc_entry_and_revoc_def( author_did=get_from(txn), revoc_reg_def_id=get_payload_data(txn)[REVOC_REG_DEF_ID], req_id=get_req_id(txn) ) writer_cls = self.get_revocation_strategy( revoc_def[VALUE][ISSUANCE_TYPE]) writer = writer_cls(self.state) writer.write(current_entry, txn)
def put_into_seq_no_db(txn): # If there is no reqId, then it's genesis txn if get_req_id(txn) is None: return txn_new = copy.deepcopy(txn) operation = get_payload_data(txn_new) operation[TXN_TYPE] = get_type(txn_new) dct = { f.IDENTIFIER.nm: get_from(txn_new), f.REQ_ID.nm: get_req_id(txn_new), OPERATION: operation, } if get_protocol_version(txn_new) is not None: dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new) digest = sha256(serialize_msg_for_signing(dct)).hexdigest().encode() seq_no = get_seq_no(txn_new) ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE]) line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no) dest_seq_no_db_storage.put(digest, line_to_record) return digest
def put_into_seq_no_db(txn): # If there is no reqId, then it's genesis txn if get_req_id(txn) is None: return txn_new = copy.deepcopy(txn) operation = get_payload_data(txn_new) operation[TXN_TYPE] = get_type(txn_new) dct = { f.IDENTIFIER.nm: get_from(txn_new), f.REQ_ID.nm: get_req_id(txn_new), OPERATION: operation, } if get_protocol_version(txn_new) is not None: dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new) digest = sha256(serialize_msg_for_signing(dct)).hexdigest() seq_no = get_seq_no(txn_new) ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE]) line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no) dest_seq_no_db_storage.put(digest, line_to_record) return digest
def write(self, current_reg_entry, txn): txn = deepcopy(txn) txn_data = get_payload_data(txn) self.set_parameters_from_txn( author_did=get_from(txn), revoc_reg_def_id=txn_data.get(REVOC_REG_DEF_ID), req_id=get_req_id(txn)) if current_reg_entry is not None: value_from_state = current_reg_entry.get(VALUE) assert value_from_state indices = value_from_state.get(ISSUED, []) value_from_txn = txn_data.get(VALUE) issued_from_txn = value_from_txn.get(ISSUED, []) revoked_from_txn = value_from_txn.get(REVOKED, []) # set with all previous issued minus revoked from txn result_indicies = set(indices).difference(revoked_from_txn) result_indicies.update(issued_from_txn) value_from_txn[REVOKED] = [] value_from_txn[ISSUED] = list(result_indicies) txn_data[VALUE] = value_from_txn # contains already changed txn self.set_to_state(txn)
def write(self, current_reg_entry, txn): txn = deepcopy(txn) txn_data = get_payload_data(txn) self.set_parameters_from_txn(author_did=get_from(txn), revoc_reg_def_id=txn_data.get(REVOC_REG_DEF_ID), req_id=get_req_id(txn)) if current_reg_entry is not None: value_from_state = current_reg_entry.get(VALUE) assert value_from_state indices = value_from_state.get(ISSUED, []) value_from_txn = txn_data.get(VALUE) issued_from_txn = value_from_txn.get(ISSUED, []) revoked_from_txn = value_from_txn.get(REVOKED, []) # set with all previous issued minus revoked from txn result_indicies = set(indices).difference(revoked_from_txn) result_indicies.update(issued_from_txn) value_from_txn[REVOKED] = [] value_from_txn[ISSUED] = list(result_indicies) txn_data[VALUE] = value_from_txn # contains already changed txn self.set_to_state(txn) del txn
def test_get_req_id_none(txn): txn["txn"]["metadata"].pop("reqId", None) assert get_req_id(txn) is None
def test_get_req_id(txn): assert get_req_id(txn) == 1513945121191691
def get_action_id(txn): seq_no = get_seq_no(txn) or '' if is_forced(txn): seq_no = '' return '{}{}'.format(get_req_id(txn), seq_no)
def migrate_txn_log(db_dir, db_name): def put_into_seq_no_db(txn): # If there is no reqId, then it's genesis txn if get_req_id(txn) is None: return txn_new = copy.deepcopy(txn) operation = get_payload_data(txn_new) operation[TXN_TYPE] = get_type(txn_new) dct = { f.IDENTIFIER.nm: get_from(txn_new), f.REQ_ID.nm: get_req_id(txn_new), OPERATION: operation, } if get_protocol_version(txn_new) is not None: dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new) digest = sha256(serialize_msg_for_signing(dct)).hexdigest().encode() seq_no = get_seq_no(txn_new) ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE]) line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no) dest_seq_no_db_storage.put(digest, line_to_record) return digest new_db_name = db_name + '_new' old_path = os.path.join(db_dir, db_name) new_path = os.path.join(db_dir, new_db_name) new_seqno_db_name = config.seqNoDbName + '_new' try: dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage, db_dir, new_seqno_db_name) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open new seq_no_db storage") return False # open new and old ledgers try: src_storage = KeyValueStorageRocksdbIntKeys(db_dir, db_name, read_only=True) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open old ledger: {}".format( os.path.join(db_dir, db_name))) return False try: dest_storage = KeyValueStorageRocksdbIntKeys(db_dir, new_db_name) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open new ledger: {}".format( os.path.join(db_dir, new_db_name))) return False # put values from old ledger to the new one try: for key, val in src_storage.iterator(): key = key.decode() val = ledger_txn_serializer.deserialize(val) new_val = transform_to_new_format(txn=val, seq_no=int(key)) digest = put_into_seq_no_db(new_val) # add digest into txn if get_req_id(new_val): new_val[TXN_PAYLOAD][TXN_PAYLOAD_METADATA][ TXN_PAYLOAD_METADATA_DIGEST] = digest new_val = ledger_txn_serializer.serialize(new_val) dest_storage.put(key, new_val) except Exception: logger.error(traceback.print_exc()) logger.error( "Could not put key/value to the new ledger '{}'".format(db_name)) return False src_storage.close() dest_storage.close() dest_seq_no_db_storage.close() # Remove old ledger try: shutil.rmtree(old_path) except Exception: logger.error(traceback.print_exc()) logger.error("Could not remove old ledger: {}".format(old_path)) return False # Rename new ledger to old one try: shutil.move(new_path, old_path) except Exception: logger.error(traceback.print_exc()) logger.error( "Could not rename temporary new ledger from '{}' to '{}'".format( new_path, old_path)) return False try: set_own_perm("indy", old_path) except Exception: pass return True
def gen_state_key(self, txn): identifier = get_from(txn) req_id = get_req_id(txn) return self.prepare_buy_key(identifier, req_id)
def migrate_txn_log(db_dir, db_name): def put_into_seq_no_db(txn): # If there is no reqId, then it's genesis txn if get_req_id(txn) is None: return txn_new = copy.deepcopy(txn) operation = get_payload_data(txn_new) operation[TXN_TYPE] = get_type(txn_new) dct = { f.IDENTIFIER.nm: get_from(txn_new), f.REQ_ID.nm: get_req_id(txn_new), OPERATION: operation, } if get_protocol_version(txn_new) is not None: dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new) digest = sha256(serialize_msg_for_signing(dct)).hexdigest() seq_no = get_seq_no(txn_new) ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE]) line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no) dest_seq_no_db_storage.put(digest, line_to_record) return digest new_db_name = db_name + '_new' old_path = os.path.join(db_dir, db_name) new_path = os.path.join(db_dir, new_db_name) new_seqno_db_name = config.seqNoDbName + '_new' try: dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage, db_dir, new_seqno_db_name) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open new seq_no_db storage") return False # open new and old ledgers try: src_storage = KeyValueStorageRocksdbIntKeys(db_dir, db_name, read_only=True) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open old ledger: {}".format(os.path.join(db_dir, db_name))) return False try: dest_storage = KeyValueStorageRocksdbIntKeys(db_dir, new_db_name) except Exception: logger.error(traceback.print_exc()) logger.error("Could not open new ledger: {}".format(os.path.join(db_dir, new_db_name))) return False # put values from old ledger to the new one try: for key, val in src_storage.iterator(): key = key.decode() val = ledger_txn_serializer.deserialize(val) new_val = transform_to_new_format(txn=val, seq_no=int(key)) digest = put_into_seq_no_db(new_val) # add digest into txn if get_req_id(new_val): new_val[TXN_PAYLOAD][TXN_PAYLOAD_METADATA][TXN_PAYLOAD_METADATA_DIGEST] = digest new_val = ledger_txn_serializer.serialize(new_val) dest_storage.put(key, new_val) except Exception: logger.error(traceback.print_exc()) logger.error("Could not put key/value to the new ledger '{}'".format(db_name)) return False src_storage.close() dest_storage.close() dest_seq_no_db_storage.close() # Remove old ledger try: shutil.rmtree(old_path) except Exception: logger.error(traceback.print_exc()) logger.error("Could not remove old ledger: {}" .format(old_path)) return False # Rename new ledger to old one try: shutil.move(new_path, old_path) except Exception: logger.error(traceback.print_exc()) logger.error("Could not rename temporary new ledger from '{}' to '{}'" .format(new_path, old_path)) return False try: set_own_perm("indy", old_path) except Exception: pass return True