def test_state_proof_returned_for_delta_with_from_earlier( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, send_revoc_reg_entry_by_default, build_get_revoc_reg_delta): rev_reg_req, rev_reg_reply = send_revoc_reg_entry_by_default reg_delta_req = copy.deepcopy(build_get_revoc_reg_delta) reg_delta_req['operation'][FROM] = get_utc_epoch() - 1000 reg_delta_req['operation'][REVOC_REG_DEF_ID] = rev_reg_req['operation'][REVOC_REG_DEF_ID] reg_delta_req['operation'][TO] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(reg_delta_req)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] check_valid_proof(reply)
def test_get_msgs_from_rxMsgs_queue(create_node_and_not_start, looper): node = create_node_and_not_start node.view_changer = ViewChanger(node) node.view_changer.view_no = 0 """pre_view_change stage""" node.view_changer.startViewChange(1) assert node.view_changer.view_no == 0 prepare = Prepare( 0, 0, 1, get_utc_epoch(), 'f99937241d4c891c08e92a3cc25966607315ca66b51827b170d492962d58a9be', 'CZecK1m7VYjSNCC7pGHj938DSW2tfbqoJp1bMJEtFqvG', '7WrAMboPTcMaQCU1raoj28vnhu2bPMMd2Lr9tEcsXeCJ') inst_change = InstanceChange(1, 25) m = node.nodeInBox.popleft() assert isinstance(m[0], ViewChangeStartMessage) node.nodestack.addRemote('someNode', genHa(), b'1DYuELN<SHbv1?NJ=][4De%^Hge887B0I!s<YGdD', 'pubkey') node.nodestack.rxMsgs.append((json.dumps(prepare._asdict()), 'pubkey')) node.nodestack.rxMsgs.append((json.dumps(inst_change._asdict()), 'pubkey')) node.msgHasAcceptableViewNo = lambda *args, **kwargs: True """While processing ViewChangeStartMessage from nodeInBox queue, should be: - move msgs from rxMsgs queue to nodeInBox queue - process all 3PC msgs (for Prepare msg it should be moved to inBox queue of master_replica) - add ViewChangeContinue msg into master_replica's inBox queue - all not 3PC msgs will be stashed in strategy queue""" looper.run(node.process_one_node_message(m)) m = node.master_replica.inBox.popleft() assert isinstance(m[0], Prepare) m = node.master_replica.inBox.popleft() assert isinstance(m, ViewChangeContinueMessage) m = node.view_changer.pre_vc_strategy.stashedNodeInBox.popleft() assert isinstance(m[0], InstanceChange)
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def testPrePrepareWithHighSeqNo(looper, txnPoolNodeSet, propagated1): def chk(): for r in getNonPrimaryReplicas(txnPoolNodeSet, instId): nodeSuspicions = len(getNodeSuspicions( r.node, Suspicions.WRONG_PPSEQ_NO.code)) assert nodeSuspicions == 1 def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares): assert (replica.prePrepares[viewNo, ppSeqNo][0]) == \ (req.identifier, req.reqId, req.digest) primary = getPrimaryReplica(txnPoolNodeSet, instId) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, instId) req = propagated1.reqDigest primary.doPrePrepare(req) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) for np in nonPrimaryReplicas: looper.run( eventually(checkPreprepare, np, primary.viewNo, primary.lastPrePrepareSeqNo - 1, req, 1, retryWait=.5, timeout=timeout)) newReqDigest = (req.identifier, req.reqId + 1, req.digest) incorrectPrePrepareReq = PrePrepare(instId, primary.viewNo, primary.lastPrePrepareSeqNo + 2, *newReqDigest, get_utc_epoch()) primary.send(incorrectPrePrepareReq, TPCStat.PrePrepareSent) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout))
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"} ) monitor = FakeSomething( isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='' ) node = FakeSomething( name="SomeNode", viewNo=request.param, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething(instId=0) ) view_changer = ViewChanger(node) return view_changer
def create_prepare_params(view_no, pp_seq_no, state_root): return [0, view_no, pp_seq_no, get_utc_epoch(), "random digest", state_root, '1' * 32]
def create_invalid_batch_committed(): return BatchCommitted(["aaaa", "bbbb"], DOMAIN_LEDGER_ID, get_utc_epoch(), generate_state_root(), generate_state_root(), 1, 2)
def pre_prepare_incorrect(state_root, request): if request.param == 'state_root': params = create_pre_prepare_params(state_root=generate_state_root()) elif request.param == 'ledger_id': params = create_pre_prepare_params(state_root=state_root, ledger_id=DOMAIN_LEDGER_ID) elif request.param == 'timestamp': params = create_pre_prepare_params(state_root=state_root, timestamp=get_utc_epoch() + 1000) elif request.param == 'txn_root': params = create_pre_prepare_params(state_root=state_root, txn_root=generate_state_root()) return PrePrepare(*params)
def save_multi_sig(request_handler): multi_sig_value = MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID, state_root_hash=state_roots_serializer.serialize( bytes(request_handler.state.committedHeadHash)), txn_root_hash='2' * 32, pool_state_root_hash='1' * 32, timestamp=get_utc_epoch()) multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], multi_sig_value) request_handler.bls_store.put(multi_sig) return multi_sig.as_dict()
def create_valid_batch_committed(): reqs = [req.as_dict for req in sdk_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] return BatchCommitted(reqs, DOMAIN_LEDGER_ID, get_utc_epoch(), generate_state_root(), generate_state_root(), 1, 2)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None): prepare = Prepare( 0, view_no, pp_seq_no, get_utc_epoch(), "random digest", state_root or '0' * 44, txn_root or '0' * 44 ) primary_node = getPrimaryReplica(nodes).node sendMessageToAll(nodes, primary_node, prepare)
def test_state_proof_returned_for_get_revoc_reg(looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_default, build_get_revoc_reg_entry): rev_entry_req, reg_reply = send_revoc_reg_entry_by_default get_revoc_reg = copy.deepcopy(build_get_revoc_reg_entry) get_revoc_reg['operation'][REVOC_REG_DEF_ID] = rev_entry_req['operation'][REVOC_REG_DEF_ID] get_revoc_reg['operation'][TIMESTAMP] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] check_valid_proof(reply)
def test_utc_epoch(): t1 = get_utc_epoch() time.sleep(1) t2 = get_utc_epoch() assert 1 <= t2 - t1 <= 2 old_tz = os.environ.get('TZ') t3 = get_utc_epoch() os.environ['TZ'] = 'Europe/London' time.tzset() time.sleep(1) t4 = get_utc_epoch() assert 1 <= t4 - t3 <= 2 t5 = get_utc_epoch() os.environ['TZ'] = 'America/St_Johns' time.tzset() time.sleep(1) t6 = get_utc_epoch() assert 1 <= t6 - t5 <= 2 if old_tz is None: del os.environ['TZ'] else: os.environ['TZ'] = old_tz
def test_send_get_revoc_reg_later_then_first_entry(looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_default, build_get_revoc_reg_entry): rev_entry_req, reg_reply = send_revoc_reg_entry_by_default get_revoc_reg = copy.deepcopy(build_get_revoc_reg_entry) get_revoc_reg['operation'][REVOC_REG_DEF_ID] = rev_entry_req['operation'][REVOC_REG_DEF_ID] get_revoc_reg['operation'][TIMESTAMP] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert rev_entry_req['operation'][REVOC_REG_DEF_ID] == reply['result'][REVOC_REG_DEF_ID] assert rev_entry_req['operation'][VALUE][ACCUM] == reply['result']['data'][VALUE][ACCUM]
def test_send_with_only_to_by_demand(looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_demand, build_get_revoc_reg_delta): rev_entry_req, reg_reply = send_revoc_reg_entry_by_demand get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) del get_revoc_reg_delta['operation'][FROM] get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = rev_entry_req['operation'][REVOC_REG_DEF_ID] get_revoc_reg_delta['operation'][TO] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert rev_entry_req['operation'][REVOC_REG_DEF_ID] == reply['result'][REVOC_REG_DEF_ID] assert rev_entry_req['operation'][VALUE][ACCUM] == reply['result'][DATA][VALUE][ACCUM_TO][VALUE][ACCUM] assert rev_entry_req['operation'][VALUE][ISSUED] == reply['result'][DATA][VALUE][ISSUED]
def test_txn_author_agreement_update_ratification_fails(looper, set_txn_author_agreement_aml, sdk_pool_handle, sdk_wallet_trustee, ratified_offset): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version, text=text, ratified=ratified) # Try to update ratification timestamp with pytest.raises(RequestRejectedException): sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version, text=text, ratified=ratified + ratified_offset)
def send_pre_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None): pre_prepare = PrePrepare(0, view_no, pp_seq_no, get_utc_epoch(), ["requests digest"], 0, "random digest", DOMAIN_LEDGER_ID, state_root or '0' * 44, txn_root or '0' * 44, 0, True) primary_node = getPrimaryReplica(nodes).node non_primary_nodes = set(nodes) - {primary_node} sendMessageToAll(nodes, primary_node, pre_prepare) for non_primary_node in non_primary_nodes: sendMessageToAll(nodes, non_primary_node, pre_prepare)
def __init__(self, viewNo, quorums, ledger_ids): node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"} ) super().__init__( name="fake node", ledger_ids=ledger_ids, viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False )
def test_state_proof_returned_for_delta_with_None_reply( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, send_revoc_reg_entry_by_default, build_get_revoc_reg_delta): rev_reg_req, rev_reg_reply = send_revoc_reg_entry_by_default reg_delta_req = copy.deepcopy(build_get_revoc_reg_delta) del reg_delta_req['operation'][FROM] reg_delta_req['operation'][REVOC_REG_DEF_ID] = rev_reg_req['operation'][REVOC_REG_DEF_ID] reg_delta_req['operation'][TO] = get_utc_epoch() - 1000 sdk_reply = sdk_send_and_check([json.dumps(reg_delta_req)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert STATE_PROOF not in reply['result']
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, nodeSetWithNodeAddedAfterSomeTxns, newNodeCaughtUp, tdirWithPoolTxns, tconf, allPluginsPath): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, nodeX, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns viewNo = nodeX.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, nodeX, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(nodeX.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, get_utc_epoch(), [[wallet.defaultId, Request.gen_req_id()]], 1, "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), ) sender.send(three_pc, rid_x_node) looper.run( eventually(checkDiscardMsg, [ nodeX, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def create_valid_batch_committed(): reqs = [req.as_dict for req in sdk_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] return BatchCommitted(reqs, DOMAIN_LEDGER_ID, 0, 1, 1, get_utc_epoch(), generate_state_root(), generate_state_root(), 1, 2, generate_state_root(), ['Alpha', 'Beta'], 0)
def fake_view_changer(request, tconf): node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, ) node = FakeSomething( name="SomeNode", viewNo=request.param, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), ) view_changer = ViewChanger(node) return view_changer
def create_bls_multi_sig(encoded_root_hash): pool_state_root_hash = base58.b58encode(b"somefakepoolroothashsomefakepoolroothash").decode("utf-8") txn_root_hash = base58.b58encode(b"somefaketxnroothashsomefaketxnroothash").decode("utf-8") ledger_id = 1 timestamp = get_utc_epoch() value = MultiSignatureValue(ledger_id=ledger_id, state_root_hash=encoded_root_hash, pool_state_root_hash=pool_state_root_hash, txn_root_hash=txn_root_hash, timestamp=timestamp) sign = "1q" * 16 participants = ["q" * 32, "w" * 32, "e" * 32, "r" * 32] return MultiSignature(sign, participants, value)
def create_bls_multi_sig(encoded_root_hash): pool_state_root_hash = base58.b58encode(b"somefakepoolroothashsomefakepoolroothash").decode("utf-8") txn_root_hash = base58.b58encode(b"somefaketxnroothashsomefaketxnroothash").decode("utf-8") ledger_id = 1 timestamp = get_utc_epoch() value = MultiSignatureValue(ledger_id=ledger_id, state_root_hash=encoded_root_hash, pool_state_root_hash=pool_state_root_hash, txn_root_hash=txn_root_hash, timestamp=timestamp) sign = "1q" * 16 participants = ["q" * 32, "w" * 32, "e" * 32, "r" * 32] return MultiSignature(sign, participants, value)
def test_send_earlier_then_first_entry_by_demand( looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_demand, build_get_revoc_reg_delta): rev_entry_req, reg_reply = send_revoc_reg_entry_by_demand get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) del get_revoc_reg_delta['operation'][FROM] get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = rev_entry_req[ 'operation'][REVOC_REG_DEF_ID] get_revoc_reg_delta['operation'][TO] = get_utc_epoch() - 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert DATA in reply['result'] assert reply['result'][DATA] is None assert reply['result'][f.SEQ_NO.nm] is None assert reply['result'][TXN_TIME] is None
def test_send_with_wrong_rev_reg_id_default(looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_default, build_get_revoc_reg_delta): rev_entry_req, reg_reply = send_revoc_reg_entry_by_default get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) del get_revoc_reg_delta['operation'][FROM] get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = randomString(30) get_revoc_reg_delta['operation'][TO] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert reply['result'][DATA][REVOC_REG_DEF_ID] == get_revoc_reg_delta[ 'operation'][REVOC_REG_DEF_ID] assert VALUE not in reply['result'][DATA] assert REVOC_TYPE not in reply['result'][DATA]
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_new_node_caught_up, allPluginsPath, sdk_wallet_client): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns viewNo = new_node.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(new_node.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded _, did = sdk_wallet_client primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, get_utc_epoch(), ["random request digest"], init_discarded(), "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), 0, True ) sender.send(three_pc, rid_x_node) looper.run(eventually(checkDiscardMsg, [new_node, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def nonPrimarySeesCorrectNumberOfPREPREPAREs(): """ 1. no of PRE-PREPARE as seen by processPrePrepare method for non-primaries must be 1; whn zero faulty nodes in system. 2. no of PRE-PREPARE as seen by processPrePrepare method for non-primaries must be greater than or equal to 0; with faults in system. """ expectedPrePrepareRequest = PrePrepare( instId, primary.viewNo, primary.lastPrePrepareSeqNo, get_utc_epoch(), [propagated1.digest], init_discarded(), Replica.batchDigest([propagated1, ]), DOMAIN_LEDGER_ID, primary.stateRootHash(DOMAIN_LEDGER_ID), primary.txnRootHash(DOMAIN_LEDGER_ID), 0, True, primary.stateRootHash(POOL_LEDGER_ID), primary.txnRootHash(AUDIT_LEDGER_ID) ) passes = 0 for npr in nonPrimaryReplicas: actualMsgs = len([param for param in getAllArgs(npr, npr.processPrePrepare) if (param['pre_prepare'][0:3] + param['pre_prepare'][4:], param['sender']) == ( expectedPrePrepareRequest[0:3] + expectedPrePrepareRequest[4:], primary.name)]) numOfMsgsWithZFN = 1 numOfMsgsWithFaults = 0 passes += int(msgCountOK(nodesSize, faultyNodes, actualMsgs, numOfMsgsWithZFN, numOfMsgsWithFaults)) assert passes >= len(nonPrimaryReplicas) - faultyNodes, \ 'Non-primary sees correct number pre-prepares - {}'.format(passes)
def __init__(self, viewNo, quorums, ledger_ids): node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) super().__init__(name="fake node", ledger_ids=ledger_ids, viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, pre_view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries_batch_needed=False, primaries=[])
def send_pre_prepare(view_no, pp_seq_no, wallet, nodes, state_root=None, txn_root=None): last_req_id = wallet._getIdData().lastReqId or 0 pre_prepare = PrePrepare(0, view_no, pp_seq_no, get_utc_epoch(), [(wallet.defaultId, last_req_id + 1)], 0, "random digest", DOMAIN_LEDGER_ID, state_root or '0' * 44, txn_root or '0' * 44) primary_node = getPrimaryReplica(nodes).node non_primary_nodes = set(nodes) - {primary_node} sendMessageToAll(nodes, primary_node, pre_prepare) for non_primary_node in non_primary_nodes: sendMessageToAll(nodes, non_primary_node, pre_prepare)
def nonPrimarySeesCorrectNumberOfPREPREPAREs(): """ 1. no of PRE-PREPARE as seen by processPrePrepare method for non-primaries must be 1; whn zero faulty nodes in system. 2. no of PRE-PREPARE as seen by processPrePrepare method for non-primaries must be greater than or equal to 0; with faults in system. """ expectedPrePrepareRequest = PrePrepare( instId, primary.viewNo, primary.lastPrePrepareSeqNo, get_utc_epoch(), [propagated1.digest], init_discarded(), Replica.batchDigest([propagated1, ]), DOMAIN_LEDGER_ID, primary.stateRootHash(DOMAIN_LEDGER_ID), primary.txnRootHash(DOMAIN_LEDGER_ID), 0, True, primary.stateRootHash(POOL_LEDGER_ID) ) passes = 0 for npr in nonPrimaryReplicas: actualMsgs = len([param for param in getAllArgs(npr, npr.processPrePrepare) if (param['pre_prepare'][0:3] + param['pre_prepare'][4:], param['sender']) == ( expectedPrePrepareRequest[0:3] + expectedPrePrepareRequest[4:], primary.name)]) numOfMsgsWithZFN = 1 numOfMsgsWithFaults = 0 passes += int(msgCountOK(nodesSize, faultyNodes, actualMsgs, numOfMsgsWithZFN, numOfMsgsWithFaults)) assert passes >= len(nonPrimaryReplicas) - faultyNodes, \ 'Non-primary sees correct number pre-prepares - {}'.format(passes)
def create_observed_data(seq_no_start=1, seq_no_end=5): req_num = seq_no_end - seq_no_start + 1 reqs = [req.as_dict for req in sdk_random_request_objects( req_num, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] msg = BatchCommitted(reqs, DOMAIN_LEDGER_ID, 0, 0, 1, get_utc_epoch(), generate_state_root(), generate_state_root(), seq_no_start, seq_no_end, generate_state_root(), ['Alpha', 'Beta']) return ObservedData(BATCH, msg)
def create_pre_prepare_params(state_root, ledger_id=DOMAIN_LEDGER_ID, txn_root=None, timestamp=None, bls_multi_sig=None, view_no=0, pool_state_root=None): params = [ 0, view_no, 0, timestamp or get_utc_epoch(), ["random request digest"], init_discarded(0), "random digest", ledger_id, state_root, txn_root or '1' * 32, 0, True ] if pool_state_root is not None: params.append(pool_state_root) if bls_multi_sig: params.append(bls_multi_sig.as_list()) return params
def from_request(req, three_phase_handler): replica = three_phase_handler.master_replica args = [ replica.instId, replica.viewNo, replica.lastPrePrepareSeqNo + 1, get_utc_epoch(), [req.digest], init_discarded(), req.digest, DOMAIN_LEDGER_ID, replica.stateRootHash(TOKEN_LEDGER_ID), replica.txnRootHash(TOKEN_LEDGER_ID), 0, True ] return PrePrepare(*args)
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_new_node_caught_up, allPluginsPath, sdk_wallet_client): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns viewNo = new_node.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(new_node.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded _, did = sdk_wallet_client primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, get_utc_epoch(), ["random request digest"], 1, "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), ) sender.send(three_pc, rid_x_node) looper.run(eventually(checkDiscardMsg, [new_node, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def test_update_state_one_by_one(txn_author_agreement_handler, taa_request, taa_pp_time, retired_time): txn, digest, state_data = create_taa_txn(taa_request, taa_pp_time) state_value, seq_no, txn_time_first = state_data payload = get_payload_data(txn) txn_time_second = get_utc_epoch() # update state txn_author_agreement_handler.update_state(txn, None, None) if retired_time and retired_time != "without": payload[TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] = retired_time state_value[TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] = retired_time txn[TXN_METADATA][TXN_METADATA_TIME] = txn_time_second txn_author_agreement_handler.update_state(txn, None, None) assert txn_author_agreement_handler.get_from_state( StaticTAAHelper.state_path_taa_digest(digest)) == (state_value, seq_no, txn_time_second)
def test_send_with_only_to_by_demand(looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_demand, build_get_revoc_reg_delta): rev_entry_req, reg_reply = send_revoc_reg_entry_by_demand get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) del get_revoc_reg_delta['operation'][FROM] get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = rev_entry_req[ 'operation'][REVOC_REG_DEF_ID] get_revoc_reg_delta['operation'][TO] = get_utc_epoch() + 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert rev_entry_req['operation'][REVOC_REG_DEF_ID] == reply['result'][ REVOC_REG_DEF_ID] assert rev_entry_req['operation'][VALUE][ACCUM] == reply['result'][DATA][ VALUE][ACCUM_TO][VALUE][ACCUM] assert rev_entry_req['operation'][VALUE][ISSUED] == reply['result'][DATA][ VALUE][ISSUED]
def test_send_earlier_then_first_entry_by_demand( looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_entry_by_demand, build_get_revoc_reg_delta): rev_entry_req, reg_reply = send_revoc_reg_entry_by_demand get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) del get_revoc_reg_delta['operation'][FROM] get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = rev_entry_req['operation'][REVOC_REG_DEF_ID] get_revoc_reg_delta['operation'][TO] = get_utc_epoch() - 1000 sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert reply['result'][DATA][REVOC_REG_DEF_ID] == rev_entry_req['operation'][REVOC_REG_DEF_ID] assert VALUE not in reply['result'][DATA] assert REVOC_TYPE not in reply['result'][DATA] assert reply['result'][f.SEQ_NO.nm] is None assert reply['result'][TXN_TIME] is None
def create_pre_prepare_params(state_root, ledger_id = DOMAIN_LEDGER_ID, txn_root=None, timestamp=None, bls_multi_sig=None): params= [0, 0, 0, timestamp or get_utc_epoch(), [('1' * 16, 1)], 0, "random digest", ledger_id, state_root, txn_root or '1' * 32] if bls_multi_sig: params.append(bls_multi_sig.as_list()) return params
def test_send_reg_def_and_get_delta_then( looper, txnPoolNodeSet, sdk_pool_handle, send_revoc_reg_def_by_default, build_get_revoc_reg_delta): rev_def_req, _ = send_revoc_reg_def_by_default get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta) get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = domain.make_state_path_for_revoc_def(authors_did=rev_def_req[f.IDENTIFIER.nm], cred_def_id=rev_def_req[OPERATION][CRED_DEF_ID], revoc_def_type=rev_def_req[OPERATION][REVOC_TYPE], revoc_def_tag=rev_def_req[OPERATION][TAG]).decode() get_revoc_reg_delta['operation'][TO] = get_utc_epoch() sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle) reply = sdk_reply[0][1] assert DATA in reply['result'] assert reply['result'][DATA] is None assert STATE_PROOF in reply['result'] assert reply['result'][STATE_PROOF] is not None
def replica(tconf, request): node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=4)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch()) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, isMaster=False, config=tconf, bls_bft_replica=bls_bft_replica) return replica
def check_result(txnPoolNodeSet, req, client, should_have_proof): for node in txnPoolNodeSet: key = node.reqHandler.prepare_buy_key(req.identifier) proof = node.reqHandler.make_proof(key) txn_time = get_utc_epoch() result = node.reqHandler.make_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time if should_have_proof: assert result[STATE_PROOF] == proof assert client.validate_proof(result) else: assert STATE_PROOF not in result
def check_result(txnPoolNodeSet, req, should_have_proof): for node in txnPoolNodeSet: req_handler = node.read_manager.request_handlers[GET_BUY] key = BuyHandler.prepare_buy_key(req.identifier, req.reqId) _, _, _, proof = req_handler.lookup(key, with_proof=True) txn_time = get_utc_epoch() result = req_handler.make_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time if should_have_proof: assert result[STATE_PROOF] == proof assert validate_proof_for_read(result, req) else: assert STATE_PROOF not in result
def test_choose_ts_from_state(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) primary_node = get_master_primary_node(txnPoolNodeSet) excpected_ts = get_utc_epoch() + 30 req_handler = primary_node.write_manager.request_handlers[NYM][0] req_handler.database_manager.ts_store.set(excpected_ts, req_handler.state.headHash) primary_node.master_replica._ordering_service.last_accepted_pre_prepare_time = None reply = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)[0][1] assert abs(excpected_ts - int(get_txn_time(reply['result']))) < 3
def test_choose_ts_from_state(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) primary_node = get_master_primary_node(txnPoolNodeSet) excpected_ts = get_utc_epoch() + 30 req_handler = primary_node.get_req_handler(DOMAIN_LEDGER_ID) req_handler.ts_store.set(excpected_ts, req_handler.state.headHash) primary_node.master_replica.last_accepted_pre_prepare_time = None reply = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)[0][1] assert abs(excpected_ts - int(get_txn_time(reply['result']))) < 3
def test_create_txn_author_agreement_succeeds(looper, set_txn_author_agreement_aml, sdk_pool_handle, sdk_wallet_trustee): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version, text=text, ratified=ratified) # Make sure TAA successfully written as latest TAA rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee)[1] assert rep[OP_FIELD_NAME] == REPLY taa = rep['result'][DATA] assert taa[TXN_AUTHOR_AGREEMENT_VERSION] == version assert taa[TXN_AUTHOR_AGREEMENT_TEXT] == text assert taa[TXN_AUTHOR_AGREEMENT_RATIFICATION_TS] == ratified assert TXN_AUTHOR_AGREEMENT_RETIREMENT_TS not in taa # Make sure TAA also available using version rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version)[1] assert rep[OP_FIELD_NAME] == REPLY assert rep['result'][DATA] == taa
def check_result(txnPoolNodeSet, req, should_have_proof): for node in txnPoolNodeSet: req_handler = node.get_req_handler(DOMAIN_LEDGER_ID) key = req_handler.prepare_buy_key(req.identifier, req.reqId) _, proof = req_handler.get_value_from_state(key, with_proof=True) txn_time = get_utc_epoch() result = req_handler.make_domain_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time if should_have_proof: assert result[STATE_PROOF] == proof assert validate_proof_for_read(result, req) else: assert STATE_PROOF not in result
def test_make_result_bls_enabled(looper, txnPoolNodeSet, client1, client1Connected, wallet1): reqs = sendRandomRequests(wallet1, client1, 1) waitForSufficientRepliesForRequests(looper, client1, requests=reqs) req = reqs[0] for node in txnPoolNodeSet: key = node.reqHandler.prepare_buy_key(req.identifier, req.reqId) proof = node.reqHandler.make_proof(key) txn_time = get_utc_epoch() result = node.reqHandler.make_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time assert result[STATE_PROOF] == proof assert client1.validate_proof(result)
def test_make_result_no_protocol_version(looper, txnPoolNodeSet, client1, client1Connected, wallet1): reqs = sendRandomRequests(wallet1, client1, 1) for req in reqs: req.protocolVersion = None waitForSufficientRepliesForRequests(looper, client1, requests=reqs) req = reqs[0] for node in txnPoolNodeSet: key = node.reqHandler.prepare_buy_key(req.identifier, req.reqId) proof = node.reqHandler.make_proof(key) txn_time = get_utc_epoch() result = node.reqHandler.make_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time assert STATE_PROOF not in proof
def send_pre_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None): pre_prepare = PrePrepare( 0, view_no, pp_seq_no, get_utc_epoch(), ["requests digest"], 0, "random digest", DOMAIN_LEDGER_ID, state_root or '0' * 44, txn_root or '0' * 44, 0, True ) primary_node = getPrimaryReplica(nodes).node non_primary_nodes = set(nodes) - {primary_node} sendMessageToAll(nodes, primary_node, pre_prepare) for non_primary_node in non_primary_nodes: sendMessageToAll(nodes, non_primary_node, pre_prepare)
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, batch_size, digest, 0, state_root, txn_root) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def create_pre_prepare_params(state_root, ledger_id=DOMAIN_LEDGER_ID, txn_root=None, timestamp=None, bls_multi_sig=None, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None, reqs=None): digest = Replica.batchDigest(reqs) if reqs is not None else "random digest" req_idrs = [req.key for req in reqs] if reqs is not None else ["random request"] params = [ inst_id, view_no, pp_seq_no, timestamp or get_utc_epoch(), req_idrs, init_discarded(0), digest, ledger_id, state_root, txn_root or '1' * 32, 0, True, pool_state_root or generate_state_root(), audit_txn_root or generate_state_root() ] if bls_multi_sig: params.append(bls_multi_sig.as_list()) return params
def check_result(txnPoolNodeSet, req, should_have_proof): for node in txnPoolNodeSet: req_handler = node.get_req_handler(DOMAIN_LEDGER_ID) key = req_handler.prepare_buy_key(req.identifier, req.reqId) _, proof = req_handler.get_value_from_state(key, with_proof=True) txn_time = get_utc_epoch() result = req_handler.make_result(req, {TXN_TYPE: "buy"}, 2, txn_time, proof) assert result assert result[DATA] == {TXN_TYPE: "buy"} assert result[f.IDENTIFIER.nm] == req.identifier assert result[f.REQ_ID.nm] == req.reqId assert result[f.SEQ_NO.nm] == 2 assert result[TXN_TIME] == txn_time if should_have_proof: assert result[STATE_PROOF] == proof assert validate_proof_for_read(result, req) else: assert STATE_PROOF not in result
def create_pre_prepare_params(state_root, ledger_id=DOMAIN_LEDGER_ID, txn_root=None, timestamp=None, bls_multi_sig=None, view_no=0, pool_state_root=None): params = [0, view_no, 0, timestamp or get_utc_epoch(), ["random request digest"], init_discarded(0), "random digest", ledger_id, state_root, txn_root or '1' * 32, 0, True] if pool_state_root is not None: params.append(pool_state_root) if bls_multi_sig: params.append(bls_multi_sig.as_list()) return params
from plenum.common.messages.fields import BlsMultiSignatureField, TimestampField from plenum.common.util import get_utc_epoch from plenum.test.input_validation.utils import b58_by_len validator = BlsMultiSignatureField() state_root_hash = b58_by_len(32) pool_state_root_hash = b58_by_len(32) txn_root_hash = b58_by_len(32) ledger_id = 1 timestamp = get_utc_epoch() value = (ledger_id, state_root_hash, pool_state_root_hash, txn_root_hash, timestamp) invalid_value1 = (-1, state_root_hash, pool_state_root_hash, txn_root_hash, timestamp) invalid_value2 = (ledger_id, b58_by_len(31), pool_state_root_hash, txn_root_hash, timestamp) invalid_value3 = (ledger_id, state_root_hash, pool_state_root_hash, txn_root_hash,
def patch_replaying_node_for_time(replaying_node, start_times): node_1st_start_time = start_times[0][0] replaying_node._time_diff = get_utc_epoch() - node_1st_start_time
import pytest from plenum.common.util import get_utc_epoch from storage.kv_in_memory import KeyValueStorageInMemory from indy_node.persistence.idr_cache import IdrCache identifier = "fake_identifier" committed_items = (0, # seq_no get_utc_epoch(), # txn_time "committed_ta_value", "committed_role_value", "committed_verkey_value",) uncommitted_items = (1, get_utc_epoch(), # txn_time "uncommitted_ta_value", "uncommitted_role_value", "uncommitted_verkey_value",) def make_idr_cache(): kvs = KeyValueStorageInMemory() cache = IdrCache("TestCache", kvs) return cache def test_committed(): """ Check that it is possible to set and get committed items """ cache = make_idr_cache() cache.set(identifier, *committed_items) real_items = cache.get(identifier)
def utc_epoch(self) -> int: """ Returns the UTC epoch according to recorder """ return get_utc_epoch() - self._time_diff
def multi_sig_value(state_root, pool_state_root): return MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID, state_root_hash=state_root, pool_state_root_hash=pool_state_root, txn_root_hash=generate_state_root(), timestamp=get_utc_epoch())
def patched_LedgerStatus(): class PLedgerStatus(LedgerStatus): schema = LedgerStatus.schema[:-1] return PLedgerStatus discard_counts = {} pre_prepare_msg = PrePrepare( 0, 1, 3, get_utc_epoch(), ['4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ',], init_discarded(), 'f99937241d4c891c08e92a3cc25966607315ca66b51827b170d492962d58a9be', 1, 'CZecK1m7VYjSNCC7pGHj938DSW2tfbqoJp1bMJEtFqvG', '7WrAMboPTcMaQCU1raoj28vnhu2bPMMd2Lr9tEcsXeCJ', 0, True ) prepare_msg = Prepare( 0, 1, 3, get_utc_epoch(),