def testNodeConnection(allPluginsPath, tconf, tdir,
                       tdir_for_func, tconf_for_func,
                       looper, txnPoolNodeSetNotStarted):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)

    nodes = txnPoolNodeSetNotStarted[:2]

    for node in nodes:
        tellKeysToOthers(node, nodes)

    A, B = nodes
    looper.add(A)
    looper.runFor(4)
    logger.debug("wait done")
    looper.add(B)
    looper.runFor(4)
    looper.run(checkNodesConnected([A, B]))
    looper.stopall()
    looper.removeProdable(A)
    looper.removeProdable(B)
    A = start_stopped_node(A, looper, tconf, tdir, allPluginsPath)
    looper.runFor(4)
    B = start_stopped_node(B, looper, tconf, tdir, allPluginsPath)
    looper.run(checkNodesConnected([A, B]))

    for node in txnPoolNodeSetNotStarted[2:]:
        looper.add(node)
    all_nodes = [A, B] + txnPoolNodeSetNotStarted[2:]
    looper.run(checkNodesConnected(all_nodes))
    stopNodes(all_nodes, looper)
    for node in all_nodes:
        looper.removeProdable(node)
Esempio n. 2
0
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, start_one_by_one=True, wait_for_elections=True):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set.copy():
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)

        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        idx = restart_set.index(node_to_restart)
        restart_set[idx] = restarted_node

        rest_nodes += [restarted_node]
        if start_one_by_one:
            looper.run(checkNodesConnected(rest_nodes))

    if not start_one_by_one:
        looper.run(checkNodesConnected(nodeSet))

    if wait_for_elections:
        ensureElectionsDone(looper=looper, nodes=nodeSet)
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet,
                                         tdir, tconf,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         sdk_wallet_steward):
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     'AnotherSteward' + randomString(4),
                                     'AnotherNode' + randomString(4),
                                     tdir,
                                     tconf)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)

    node_new_ha, client_new_ha = genHa(2)
    logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha,
                                                   client_new_ha))

    # Making the change HA txn an confirming its succeeded
    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name,
                         node_new_ha.host, node_new_ha.port,
                         client_new_ha.host, client_new_ha.port)

    # Stopping existing nodes
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    # Starting nodes again by creating `Node` objects since that simulates
    # what happens when starting the node with script
    restartedNodes = []
    for node in txnPoolNodeSet[:-1]:
        config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
        restartedNode = TestNode(node.name,
                                 config_helper=config_helper,
                                 config=tconf, ha=node.nodestack.ha,
                                 cliha=node.clientstack.ha)
        looper.add(restartedNode)
        restartedNodes.append(restartedNode)

    # Starting the node whose HA was changed
    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    node = TestNode(new_node.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=node_new_ha, cliha=client_new_ha)
    looper.add(node)
    restartedNodes.append(node)

    looper.run(checkNodesConnected(restartedNodes))
    waitNodeDataEquality(looper, node, *restartedNodes[:-1])
    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client, sdk_pool_handle)
def testNodesComingUpAtDifferentTimes(allPluginsPath, tconf, tdir,
                                      tdir_for_func, tconf_for_func,
                                      looper, txnPoolNodeSetNotStarted):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)

    nodes = txnPoolNodeSetNotStarted

    names = list(node.name for node in nodes)

    shuffle(names)
    waits = [randint(1, 10) for _ in names]
    rwaits = [randint(1, 10) for _ in names]

    for node in nodes:
        tellKeysToOthers(node, nodes)

    for i, node in enumerate(nodes):
        looper.add(node)
        looper.runFor(waits[i])
    looper.run(checkNodesConnected(nodes))
    logger.debug("connects")
    logger.debug("node order: {}".format(names))
    logger.debug("waits: {}".format(waits))

    current_node_set = set(nodes)
    for node in nodes:
        disconnect_node_and_ensure_disconnected(looper,
                                                current_node_set,
                                                node,
                                                timeout=len(nodes),
                                                stopNode=True)
        looper.removeProdable(node)
        current_node_set.remove(node)

    for i, node in enumerate(nodes):
        restarted_node = start_stopped_node(node, looper,
                                            tconf, tdir, allPluginsPath)
        current_node_set.add(restarted_node)
        looper.runFor(rwaits[i])

    looper.runFor(3)
    looper.run(checkNodesConnected(current_node_set))

    stopNodes(current_node_set, looper)
    logger.debug("reconnects")
    logger.debug("node order: {}".format(names))
    logger.debug("rwaits: {}".format(rwaits))
    for node in current_node_set:
        looper.removeProdable(node)
def testKeyShareParty(looper, txnPoolNodeSet, tdir_for_func, tconf_for_func):
    """
    connections to all nodes should be successfully established when key
    sharing is enabled.
    """

    logger.debug("-----sharing keys-----")
    looper.run(checkNodesConnected(txnPoolNodeSet))

    logger.debug("-----key sharing done, connect after key sharing-----")
    looper.run(checkNodesConnected(txnPoolNodeSet),
               msgAll(txnPoolNodeSet))
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)
def testNodeKeysChanged(looper, txnPoolNodeSet, tdirWithPoolTxns,
                        tconf, steward1, nodeThetaAdded,
                        allPluginsPath=None):
    newSteward, newStewardWallet, newNode = nodeThetaAdded

    # Since the node returned by fixture `nodeThetaAdded` was abandoned in the
    # previous test, so getting node `Theta` from `txnPoolNodeSet`
    newNode = getNodeWithName(txnPoolNodeSet, newNode.name)

    newNode.stop()
    nodeHa, nodeCHa = HA(*newNode.nodestack.ha), HA(*newNode.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = SimpleSigner(seed=sigseed).naclSigner.verhex.decode()
    changeNodeKeys(looper, newSteward, newStewardWallet, newNode, verkey)
    initLocalKeep(newNode.name, tdirWithPoolTxns, sigseed)
    initLocalKeep(newNode.name+CLIENT_STACK_SUFFIX, tdirWithPoolTxns, sigseed)
    looper.removeProdable(name=newNode.name)
    logger.debug("{} starting with HAs {} {}".format(newNode, nodeHa, nodeCHa))
    node = TestNode(newNode.name, basedirpath=tdirWithPoolTxns, config=tconf,
                    ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(eventually(checkNodeLedgersForEquality, node,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=10))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
def testNodeRequestingTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes does not reply and the newly joined node cannot
    complete the process till the timeout and then requests the missing
    transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    txnPoolNodeSet.append(newNode)

    def ignoreCatchupReq(self, req, frm):
        logger.info("{} being malicious and ignoring catchup request {} "
                    "from {}".format(self, req, frm))

    # One of the node does not process catchup request.
    txnPoolNodeSet[0].nodeMsgRouter.routes[CatchupReq] = types.MethodType(
        ignoreCatchupReq, txnPoolNodeSet[0].ledgerManager)
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    looper.run(eventually(checkNodeLedgersForEquality, newNode,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=90))
Esempio n. 8
0
def test_order_after_demote_and_restart(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath,
                                        sdk_wallet_stewards):
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 3, 3)

    primary_node = txnPoolNodeSet[0]
    node_to_stop = txnPoolNodeSet[1]
    node_to_demote = txnPoolNodeSet[2]
    txnPoolNodeSet.remove(node_to_demote)

    node_to_stop.cleanupOnStopping = True
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf,
                              pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, check_primaries=False)

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 1, 1)

    def get_current_bls_keys(node):
        return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys

    assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
Esempio n. 9
0
def update_node_data_and_reconnect(looper, txnPoolNodeSet, steward_wallet,
                                   sdk_pool_handle, node, new_node_ip,
                                   new_node_port, new_client_ip,
                                   new_client_port, tdir, tconf):
    node_ha = node.nodestack.ha
    cli_ha = node.clientstack.ha
    node_dest = hexToFriendly(node.nodestack.verhex)
    sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, node_dest,
                         node.name, new_node_ip, new_node_port, new_client_ip,
                         new_client_port)
    # restart the Node with new HA
    node.stop()
    looper.removeProdable(name=node.name)
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=HA(new_node_ip or node_ha.host, new_node_port
                                   or node_ha.port),
                             cliha=HA(new_client_ip or cli_ha.host,
                                      new_client_port or cli_ha.port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, steward_wallet,
                               sdk_pool_handle)
    return restartedNode
Esempio n. 10
0
def test_replica_removing_before_vc_with_primary_disconnected(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf,
        tdir, allPluginsPath, chkFreqPatched, view_change):
    """
    1. Remove replica
    2. Reconnect master primary
    3. Check that nodes and replicas correctly added
    """
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    node = txnPoolNodeSet[0]
    start_replicas_count = node.replicas.num_replicas
    instance_id = start_replicas_count - 1
    node.replicas.remove_replica(instance_id)
    _check_replica_removed(node, start_replicas_count, instance_id)
    assert not node.monitor.isMasterDegraded()
    assert len(node.requests) == 0
    # trigger view change on all nodes
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node)
    txnPoolNodeSet.remove(node)
    looper.removeProdable(node)
    node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitForViewChange(looper,
                      txnPoolNodeSet,
                      expectedViewNo=1,
                      customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    assert start_replicas_count == node.replicas.num_replicas
def test_catchup_with_ledger_statuses_in_old_format_from_one_node(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward,
        tconf, tdir, allPluginsPath):
    """
    A node is restarted and during a catch-up receives ledger statuses
    in an old format (without `protocolVersion`) from one of nodes in the pool.
    The test verifies that the node successfully completes the catch-up and
    participates in ordering of further transactions.
    """
    node_to_restart = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]

    old_node = txnPoolNodeSet[0]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    original_get_ledger_status = old_node.getLedgerStatus

    # Patch the method getLedgerStatus to
    # get_ledger_status_without_protocol_version for sending ledger status
    # in old format (without `protocolVersion`)

    def get_ledger_status_without_protocol_version(ledgerId: int):
        original_ledger_status = original_get_ledger_status(ledgerId)
        return LedgerStatusInOldFormat(original_ledger_status.ledgerId,
                                       original_ledger_status.txnSeqNo,
                                       original_ledger_status.viewNo,
                                       original_ledger_status.ppSeqNo,
                                       original_ledger_status.merkleRoot)

    old_node.getLedgerStatus = get_ledger_status_without_protocol_version

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_restart)
    looper.removeProdable(name=node_to_restart.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    # add `node_to_restart` to pool
    node_to_restart = start_stopped_node(node_to_restart, looper, tconf,
                                         tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_restart
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Verify that `node_to_restart` successfully completes catch-up
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)

    # check discarding ledger statuses from `old_node` for all ledgers
    assert countDiscarded(node_to_restart,
                          'replied message has invalid structure') >= 3

    # Verify that `node_to_restart` participates in ordering
    # of further transactions
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)
def testClientConnectsToNewNode(looper, txnPoolNodeSet, tdir, client_tdir,
                                tconf, steward1, stewardWallet,
                                allPluginsPath):
    """
    A client should be able to connect to a newly added node
    """
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = "Epsilon"
    oldNodeReg = copy(steward1.nodeReg)
    newSteward, newStewardWallet, newNode = addNewStewardAndNode(
        looper, steward1, stewardWallet, newStewardName, newNodeName, tdir,
        client_tdir, tconf, allPluginsPath)
    txnPoolNodeSet.append(newNode)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    logger.debug("{} connected to the pool".format(newNode))

    def chkNodeRegRecvd():
        assert (len(steward1.nodeReg) - len(oldNodeReg)) == 1
        assert (newNode.name + CLIENT_STACK_SUFFIX) in steward1.nodeReg

    timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet))
    looper.run(eventually(chkNodeRegRecvd, retryWait=1, timeout=timeout))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
def test_get_last_ordered_timestamp_after_catchup(looper, txnPoolNodeSet,
                                                  sdk_pool_handle,
                                                  sdk_wallet_steward, tconf,
                                                  tdir, allPluginsPath):
    node_to_disconnect = txnPoolNodeSet[-1]
    reply_before = sdk_send_random_and_check(looper, txnPoolNodeSet,
                                             sdk_pool_handle,
                                             sdk_wallet_steward, 1)[0][1]
    looper.runFor(2)
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    reply = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_steward, 1)[0][1]

    node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf,
                                            tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper,
                         node_to_disconnect,
                         *txnPoolNodeSet[:-1],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    ts_from_state = node_to_disconnect.master_replica._get_last_timestamp_from_state(
        DOMAIN_LEDGER_ID)
    assert ts_from_state == get_txn_time(reply['result'])
    assert ts_from_state != get_txn_time(reply_before['result'])
Esempio n. 14
0
def testNodeCatchupAfterRestart(newNodeCaughtUp, txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """

    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Stopping node {} with pool ledger size {}".format(
        newNode, newNode.poolManager.txnSeqNo))
    ensureNodeDisconnectedFromPool(looper, txnPoolNodeSet, newNode)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    logger.debug("Starting the stopped node, {}".format(newNode))
    newNode.start(looper.loop)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:4],
                   retryWait=1,
                   timeout=15))
Esempio n. 15
0
def add_started_node(looper, new_node, txnPoolNodeSet, client_tdir,
                     stewardClient, stewardWallet, sigseed, bls_key):
    '''
    Adds already created node to the pool,
    that is sends NODE txn.
    Makes sure that node is actually added and connected to all otehr nodes.
    '''
    newSteward, newStewardWallet = addNewSteward(looper,
                                                 client_tdir,
                                                 stewardClient,
                                                 stewardWallet,
                                                 "Steward" + new_node.name,
                                                 clientClass=TestClient)
    node_name = new_node.name
    send_new_node_txn(sigseed, new_node.poolManager.nodeReg[node_name][0],
                      new_node.poolManager.nodeReg[node_name][1],
                      new_node.poolManager.cliNodeReg[node_name + "C"][0],
                      new_node.poolManager.cliNodeReg[node_name + "C"][1],
                      bls_key, node_name, newSteward, newStewardWallet)

    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)

    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
def test_current_state_propagation(newNodeCaughtUp, txnPoolNodeSet,
                                   nodeSetWithNodeAddedAfterSomeTxns, tconf,
                                   tdir, allPluginsPath):
    """
    Checks that nodes send CurrentState to lagged nodes.
    """

    # 1. Start pool
    looper, new_node, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns

    # 2. Stop one node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            new_node,
                                            stopNode=True)
    looper.removeProdable(new_node)

    # 3. Start it again
    restarted_node = start_stopped_node(new_node, looper, tconf, tdir,
                                        allPluginsPath)
    txnPoolNodeSet[-1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.runFor(5)

    # 4. Check that all nodes sent CurrentState
    for node in txnPoolNodeSet[:-1]:
        sent_times = node.spylog.count(
            node.send_current_state_to_lagging_node.__name__)
        assert sent_times != 0, "{} haven't sent CurrentState".format(node)
    looper.runFor(5)

    # 5. Check that it received CurrentState messages
    received_times = restarted_node.spylog.count(
        restarted_node.process_current_state_message.__name__)
    assert received_times != 0
Esempio n. 17
0
def testClientConnectsToNewNode(looper, txnPoolNodeSet, tdirWithPoolTxns,
                                tconf, steward1, stewardWallet, allPluginsPath):
    """
    A client should be able to connect to a newly added node
    """
    newStewardName = "testClientSteward"+randomString(3)
    newNodeName = "Epsilon"
    oldNodeReg = copy(steward1.nodeReg)
    newSteward, newStewardWallet, newNode = addNewStewardAndNode(looper,
                                                steward1, stewardWallet,
                                                newStewardName, newNodeName,
                                                tdirWithPoolTxns, tconf,
                                                allPluginsPath)
    txnPoolNodeSet.append(newNode)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    logger.debug("{} connected to the pool".format(newNode))

    def chkNodeRegRecvd():
        assert (len(steward1.nodeReg) - len(oldNodeReg)) == 1
        assert (newNode.name + CLIENT_STACK_SUFFIX) in steward1.nodeReg

    looper.run(eventually(chkNodeRegRecvd, retryWait=1, timeout=5))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet, sdk_wallet_steward,
                                    sdk_pool_handle, tdir, tconf,
                                    allPluginsPath):
    new_steward_name = "testClientSteward" + randomString(3)
    new_node_name = "Kappa"

    # adding a new node without SERVICES field
    # it means the node is in the inactive state
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     new_steward_name,
                                     new_node_name,
                                     tdir,
                                     tconf,
                                     allPluginsPath,
                                     services=None)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    new_node = update_node_data_and_reconnect(looper,
                                              txnPoolNodeSet + [new_node],
                                              new_steward_wallet,
                                              sdk_pool_handle, new_node, None,
                                              None, None, None, tdir, tconf)
    txnPoolNodeSet.append(new_node)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet,
                               sdk_pool_handle)
def test_state_regenerated_from_ledger(looper,
                                       nodeSet, tconf, tdir,
                                       sdk_pool_handle,
                                       sdk_wallet_trustee,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(sdk_add_new_nym(looper, sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             'TA' + str(i),
                                             TRUST_ANCHOR_STRING))
        sdk_add_raw_attribute(looper, sdk_pool_handle,
                              trust_anchors[-1],
                              randomString(6),
                              randomString(10))

    for wh in trust_anchors:
        for i in range(3):
            sdk_add_new_nym(looper, sdk_pool_handle,
                            wh, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for wh in trust_anchors:
        sdk_add_new_nym(looper, sdk_pool_handle,
                        wh, 'NP--' + randomString(5))

    ensure_all_nodes_have_same_data(looper, nodeSet)
def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes):
    """
    Case 2 - A node making nominations for a multiple other nodes. Consider 4
    nodes A, B, C, and D. Lets say node B is malicious and nominates node C
    to all nodes. Again node B nominates node D to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    # Node B sends multiple NOMINATE msgs but only after A has nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=.25, timeout=1))

    instId = getSelfNominationByNode(A)

    BRep = Replica.generateName(B.name, instId)
    CRep = Replica.generateName(C.name, instId)
    DRep = Replica.generateName(D.name, instId)

    # Node B first sends NOMINATE msgs for Node C to all nodes
    B.send(Nomination(CRep, instId, B.viewNo))
    # Node B sends NOMINATE msgs for Node D to all nodes
    B.send(Nomination(DRep, instId, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so
    # not considering it) should have nomination for node C from node B since
    #  node B first nominated node C
    for node in [A, C, D]:
        assert node.elector.nominations[instId][BRep] == CRep
Esempio n. 21
0
def test_restarted_node_catches_up_config_ledger_txns(
        looper, some_config_txns_done, txnPoolNodeSet, sdk_wallet_client,
        sdk_pool_handle, sdk_new_node_caught_up, keys, tconf, tdir,
        allPluginsPath):
    """
    A node is stopped, a few config ledger txns happen,
    the stopped node is started and catches up the config ledger
    """
    new_node = sdk_new_node_caught_up
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            new_node,
                                            stopNode=True)
    looper.removeProdable(new_node)

    # Do some config txns; using a fixture as a method, passing some arguments
    # as None as they only make sense for the fixture (pre-requisites)
    send_some_config_txns(looper, sdk_pool_handle, sdk_wallet_client, keys)

    # Make sure new node got out of sync
    for node in txnPoolNodeSet[:-1]:
        assert new_node.configLedger.size < node.configLedger.size

    restarted_node = start_stopped_node(new_node, looper, tconf, tdir,
                                        allPluginsPath)
    txnPoolNodeSet[-1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))

    waitNodeDataEquality(looper,
                         restarted_node,
                         *txnPoolNodeSet[:-1],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
Esempio n. 22
0
def testAdd2NewNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, tconf, steward1,
                     stewardWallet, allPluginsPath):
    """
    Add 2 new nodes to trigger replica addition and primary election
    """
    for nodeName in ("Zeta", "Eta"):
        newStewardName = "testClientSteward"+randomString(3)
        newSteward, newStewardWallet, newNode = addNewStewardAndNode(looper,
                                                   steward1,
                                                   stewardWallet,
                                                   newStewardName,
                                                   nodeName,
                                                   tdirWithPoolTxns, tconf,
                                                   allPluginsPath)
        txnPoolNodeSet.append(newNode)
        looper.run(checkNodesConnected(txnPoolNodeSet))
        logger.debug("{} connected to the pool".format(newNode))
        looper.run(eventually(checkNodeLedgersForEquality, newNode,
                              *txnPoolNodeSet[:-1], retryWait=1, timeout=7))

    f = getMaxFailures(len(txnPoolNodeSet))

    def checkFValue():
        for node in txnPoolNodeSet:
            assert node.f == f
            assert len(node.replicas) == (f + 1)

    looper.run(eventually(checkFValue, retryWait=1, timeout=5))
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1,
                               timeout=5)
Esempio n. 23
0
def test_propagate_primary_after_primary_restart_view_0(
        looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward,
        tdir, allPluginsPath):
    """
    Delay instance change msgs to prevent view change during primary restart
    to test propagate primary for primary node.
    ppSeqNo should be > 0 to be able to check that propagate primary restores all
    indices correctly
    case viewNo == 0
    """
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                               sdk_pool_handle)

    old_ppseqno = _get_ppseqno(txnPoolNodeSet)
    assert (old_ppseqno > 0)

    old_viewNo = checkViewNoForNodes(txnPoolNodeSet)
    old_primary = get_master_primary_node(txnPoolNodeSet)

    delay_instance_change(txnPoolNodeSet, IC_DELAY_SEC)

    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            old_primary,
                                            stopNode=True)

    looper.removeProdable(old_primary)

    logger.info("Restart node {}".format(old_primary))

    restartedNode = start_stopped_node(old_primary,
                                       looper,
                                       tconf,
                                       tdir,
                                       allPluginsPath,
                                       delay_instance_change_msgs=False)
    idx = [
        i for i, n in enumerate(txnPoolNodeSet) if n.name == restartedNode.name
    ][0]
    txnPoolNodeSet[idx] = restartedNode

    restartedNode.nodeIbStasher.delay(icDelay(IC_DELAY_SEC))

    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    new_viewNo = checkViewNoForNodes(txnPoolNodeSet)
    assert (new_viewNo == old_viewNo)

    new_primary = get_master_primary_node(txnPoolNodeSet)
    assert (new_primary.name == old_primary.name)

    # check ppSeqNo the same
    _get_ppseqno(txnPoolNodeSet)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                               sdk_pool_handle)

    new_ppseqno = _get_ppseqno(txnPoolNodeSet)
    assert (new_ppseqno > old_ppseqno)
def test_removed_replica_restored_on_view_change(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        tconf, tdir, allPluginsPath, chkFreqPatched, view_change):
    """
    1. Remove replica on some node which is not master primary
    2. Reconnect the node which was master primary so far
    3. Check that nodes and replicas correctly added
    """
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    node = get_last_master_non_primary_node(txnPoolNodeSet)
    start_replicas_count = node.replicas.num_replicas
    instance_id = start_replicas_count - 1

    node.replicas.remove_replica(instance_id)
    check_replica_removed(node, start_replicas_count, instance_id)

    # trigger view change on all nodes
    master_primary = get_master_primary_node(txnPoolNodeSet)
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_primary)
    txnPoolNodeSet.remove(master_primary)
    looper.removeProdable(master_primary)
    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    restarted_node = start_stopped_node(master_primary, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(restarted_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))

    waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=1,
                      customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    assert start_replicas_count == node.replicas.num_replicas
Esempio n. 25
0
def testReplayLedger(addNymTxn, addedRawAttribute, submittedPublicKeys,
                     nodeSet, looper, tconf, tdirWithPoolTxns, allPluginsPath,
                     txnPoolNodeSet):
    """
    stop first node (which will clean graph db too)
    then restart node
    """
    nodeToStop = nodeSet[0]
    nodeToStop.cleanupOnStopping = False
    nodeToStop.stop()
    looper.removeProdable(nodeToStop)
    #client = nodeToStop.graphStore.client
    #client.db_drop(client._connection.db_opened)
    newNode = TestNode(nodeToStop.name,
                       basedirpath=tdirWithPoolTxns,
                       config=tconf,
                       pluginPaths=allPluginsPath,
                       ha=nodeToStop.nodestack.ha,
                       cliha=nodeToStop.clientstack.ha)
    looper.add(newNode)
    nodeSet[0] = newNode
    looper.run(checkNodesConnected(nodeSet, overrideTimeout=30))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[1:4],
                   retryWait=1,
                   timeout=15))

    compareGraph("NYM", nodeSet)
    compareGraph("IssuerKey", nodeSet)
    compareGraph("Schema", nodeSet)
def test_fill_ts_store_after_catchup(txnPoolNodeSet, looper, sdk_pool_handle,
                                     sdk_wallet_steward, tconf, tdir,
                                     allPluginsPath):
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    node_to_disconnect = txnPoolNodeSet[-1]

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_replies = sdk_send_random_and_check(looper, txnPoolNodeSet,
                                            sdk_pool_handle,
                                            sdk_wallet_steward, 2)

    node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf,
                                            tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))

    waitNodeDataEquality(looper,
                         node_to_disconnect,
                         *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    req_handler = node_to_disconnect.read_manager.request_handlers[GET_BUY]
    for reply in sdk_replies:
        key = BuyHandler.prepare_buy_key(get_from(reply[1]['result']),
                                         get_req_id(reply[1]['result']))
        root_hash = req_handler.database_manager.ts_store.get_equal_or_prev(
            get_txn_time(reply[1]['result']))
        assert root_hash
        from_state = req_handler.state.get_for_root_hash(root_hash=root_hash,
                                                         key=key)
        assert domain_state_serializer.deserialize(from_state)['amount'] == \
               get_payload_data(reply[1]['result'])['amount']
def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet,
                                    sdk_wallet_steward,
                                    sdk_pool_handle, tdir, tconf, allPluginsPath):
    new_steward_name = "testClientSteward" + randomString(3)
    new_node_name = "Kappa"

    # adding a new node without SERVICES field
    # it means the node is in the inactive state
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     new_steward_name,
                                     new_node_name,
                                     tdir,
                                     tconf,
                                     allPluginsPath,
                                     services=None)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    new_node = update_node_data_and_reconnect(looper, txnPoolNodeSet + [new_node],
                                              new_steward_wallet,
                                              sdk_pool_handle,
                                              new_node,
                                              None, None,
                                              None, None,
                                              tdir, tconf)
    txnPoolNodeSet.append(new_node)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
Esempio n. 28
0
def sdk_add_2_nodes(looper, txnPoolNodeSet, sdk_pool_handle,
                    sdk_wallet_steward, tdir, tconf, allPluginsPath):
    names = ("Zeta", "Eta")
    new_nodes = []
    for node_name in names:
        new_steward_name = "testClientSteward" + randomString(3)
        new_steward_wallet, new_node = \
            sdk_add_new_steward_and_node(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_steward,
                                         new_steward_name,
                                         node_name,
                                         tdir,
                                         tconf,
                                         allPluginsPath)
        txnPoolNodeSet.append(new_node)
        looper.run(checkNodesConnected(txnPoolNodeSet))
        waitNodeDataEquality(
            looper,
            new_node,
            *txnPoolNodeSet[:-1],
            exclude_from_check=['check_last_ordered_3pc_backup'])
        sdk_pool_refresh(looper, sdk_pool_handle)
        new_nodes.append(new_node)
    return new_nodes
Esempio n. 29
0
def test_upper_bound_of_checkpoint_after_catchup_is_divisible_by_chk_freq(
        chkFreqPatched, looper, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client, tdir,
        tconf, allPluginsPath):
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 4)

    _, new_node = sdk_add_new_steward_and_node(
        looper, sdk_pool_handle, sdk_wallet_steward,
        'EpsilonSteward', 'Epsilon', tdir, tconf,
        allPluginsPath=allPluginsPath)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    # Epsilon did not participate in ordering of the batch with EpsilonSteward
    # NYM transaction and the batch with Epsilon NODE transaction.
    # Epsilon got these transactions via catch-up.

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    for replica in new_node.replicas.values():
        assert len(replica.checkpoints) == 1
        assert next(iter(replica.checkpoints)) == (7, 10)
def test_number_txns_in_catchup_and_vc_queue_valid(looper,
                                                   txnPoolNodeSet,
                                                   tconf,
                                                   sdk_pool_handle,
                                                   sdk_wallet_steward,
                                                   tdir,
                                                   allPluginsPath):
    num_txns = 5
    master_node = get_master_primary_node(txnPoolNodeSet)
    master_node_index = txnPoolNodeSet.index(master_node)
    other_nodes = txnPoolNodeSet.copy()
    other_nodes.remove(master_node)
    old_view = master_node.viewNo
    expected_view_no = old_view + 1
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_node, stopNode=True)
    looper.removeProdable(master_node)
    looper.run(eventually(checkViewNoForNodes, other_nodes, expected_view_no, retryWait=1,
                          timeout=tconf.NEW_VIEW_TIMEOUT))
    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_send_random_and_check(looper, other_nodes, sdk_pool_handle, sdk_wallet_steward, num_txns)
    master_node = start_stopped_node(master_node, looper, tconf,
                                     tdir, allPluginsPath)
    txnPoolNodeSet[master_node_index] = master_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, master_node, *txnPoolNodeSet[-1:],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    latest_info = master_node._info_tool.info
    assert latest_info['Node_info']['Catchup_status']['Number_txns_in_catchup'][1] == num_txns
    assert latest_info['Node_info']['View_change_status']['View_No'] == expected_view_no
    for n in other_nodes:
        assert n._info_tool.info['Node_info']['View_change_status']['Last_complete_view_no'] == expected_view_no
def testNodeSchedulesUpgradeAfterRestart(upgradeScheduled, looper, nodeSet,
                                         validUpgrade, testNodeClass,
                                         tdirWithPoolTxns, tconf,
                                         allPluginsPath):
    names = []
    while nodeSet:
        node = nodeSet.pop()
        names.append(node.name)
        node.cleanupOnStopping = False
        looper.removeProdable(node)
        node.stop()
        del node

    for nm in names:
        node = testNodeClass(nm,
                             basedirpath=tdirWithPoolTxns,
                             config=tconf,
                             pluginPaths=allPluginsPath)
        looper.add(node)
        nodeSet.append(node)

    looper.run(checkNodesConnected(nodeSet))
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=10)
    looper.run(
        eventually(checkUpgradeScheduled,
                   nodeSet,
                   validUpgrade[VERSION],
                   retryWait=1,
                   timeout=10))
def add_new_node(helpers, looper, node_set, sdk_wallet_steward, current_amount,
                 seq_no, fees_set, sdk_pool_handle, tdir, tconf,
                 allPluginsPath, address, do_post_node_creation):
    new_did, verkey = helpers.wallet.create_did(sdk_wallet=sdk_wallet_steward)
    req = helpers.request.nym(sdk_wallet=sdk_wallet_steward,
                              alias="new_steward",
                              role=STEWARD_STRING,
                              dest=new_did,
                              verkey=verkey)
    utxos = [{ADDRESS: address, AMOUNT: current_amount, f.SEQ_NO.nm: seq_no}]
    req = add_fees_request_with_address(helpers,
                                        fees_set,
                                        req,
                                        address,
                                        utxos=utxos)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers,
                                                             fees_set,
                                                             seq_no,
                                                             looper,
                                                             addresses,
                                                             current_amount,
                                                             nym_with_fees=req)
    new_steward_wallet_handle = sdk_wallet_steward[0], new_did
    new_node = sdk_add_new_node(looper,
                                sdk_pool_handle,
                                new_steward_wallet_handle,
                                'Epsilon',
                                tdir,
                                tconf,
                                allPluginsPath=allPluginsPath,
                                do_post_node_creation=do_post_node_creation)
    node_set.append(new_node)
    looper.run(checkNodesConnected(node_set))
    waitNodeDataEquality(looper, new_node, *node_set[:-1])
Esempio n. 33
0
def testNodePortChanged(looper, txnPoolNodeSet, tdirWithPoolTxns,
                        tconf, steward1, stewardWallet, nodeThetaAdded):
    """
    An running node's port is changed
    """
    newSteward, newStewardWallet, newNode = nodeThetaAdded
    nodeNewHa, clientNewHa = genHa(2)
    logger.debug("{} changing HAs to {} {}".format(newNode, nodeNewHa,
                                                   clientNewHa))
    changeNodeHa(looper, newSteward, newStewardWallet, newNode,
                 nodeHa=nodeNewHa, clientHa=clientNewHa)
    newNode.stop()
    looper.removeProdable(name=newNode.name)
    logger.debug("{} starting with HAs {} {}".format(newNode, nodeNewHa,
                                                     clientNewHa))
    node = TestNode(newNode.name, basedirpath=tdirWithPoolTxns, config=tconf,
                    ha=nodeNewHa, cliha=clientNewHa)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(eventually(checkNodeLedgersForEquality, node,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=10))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
Esempio n. 34
0
def nodeThetaAdded(looper,
                   txnPoolNodeSet,
                   tdirWithPoolTxns,
                   tconf,
                   steward1,
                   stewardWallet,
                   allPluginsPath,
                   testNodeClass=None,
                   testClientClass=None,
                   name=None):
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = name or "Theta"
    newSteward, newStewardWallet, newNode = addNewStewardAndNode(
        looper,
        steward1,
        stewardWallet,
        newStewardName,
        newNodeName,
        tdirWithPoolTxns,
        tconf,
        allPluginsPath,
        nodeClass=testNodeClass,
        clientClass=testClientClass)
    txnPoolNodeSet.append(newNode)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
    return newSteward, newStewardWallet, newNode
def testNodeRejectingInvalidTxns(conf, txnPoolNodeSet, patched_node,
                                 nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes replies with incorrect transactions. The newly
    joined node detects that and rejects the transactions and thus blacklists
    the node. Ii thus cannot complete the process till the timeout and then
    requests the missing transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    bad_node = patched_node

    do_not_tell_clients_about_newly_joined_node(txnPoolNodeSet)

    logger.debug('Catchup request processor of {} patched'.format(bad_node))

    looper.run(checkNodesConnected(txnPoolNodeSet))

    # catchup #1 -> CatchupTransactionsTimeout -> catchup #2
    catchup_timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet) + 1)
    timeout = 2 * catchup_timeout + conf.CatchupTransactionsTimeout

    # have to skip seqno_db check because the txns are not executed
    # on the new node
    waitNodeDataEquality(looper,
                         newNode,
                         *txnPoolNodeSet[:-1],
                         customTimeout=timeout)

    assert newNode.isNodeBlacklisted(bad_node.name)
Esempio n. 36
0
def setupNodesAndClient(looper: Looper,
                        nodes: Sequence[TestNode],
                        nodeReg=None,
                        tmpdir=None):
    looper.run(checkNodesConnected(nodes))
    ensureElectionsDone(looper=looper, nodes=nodes)
    return setupClient(looper, nodes, nodeReg=nodeReg, tmpdir=tmpdir)
Esempio n. 37
0
def test_integration_setup_last_ordered_after_catchup(looper, txnPoolNodeSet,
                                                      sdk_wallet_steward,
                                                      sdk_wallet_client,
                                                      sdk_pool_handle, tdir,
                                                      tconf, allPluginsPath):
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    _, new_node = sdk_add_new_steward_and_node(looper,
                                               sdk_pool_handle,
                                               sdk_wallet_steward,
                                               'EpsilonSteward',
                                               'Epsilon',
                                               tdir,
                                               tconf,
                                               allPluginsPath=allPluginsPath)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper,
                         new_node,
                         *txnPoolNodeSet[:-1],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    looper.run(eventually(replicas_synced, new_node))
    for node in txnPoolNodeSet:
        for replica in node.replicas.values():
            assert replica.last_ordered_3pc == (0, 4)
            if not replica.isMaster:
                assert get_count(replica,
                                 replica._request_three_phase_msg) == 0
def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet, tdirWithPoolTxns,
                                    tconf, steward1, stewardWallet,
                                    allPluginsPath):
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = "Kappa"

    # adding a new node without SERVICES field
    # it means the node is in the inactive state
    def del_services(op):
        del op[DATA][SERVICES]

    newSteward, newStewardWallet, newNode = \
        addNewStewardAndNode(looper,
                             steward1, stewardWallet,
                             newStewardName, newNodeName,
                             tdirWithPoolTxns, tconf, allPluginsPath,
                             transformNodeOpFunc=del_services)
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # turn the new node on
    node_data = {ALIAS: newNode.name, SERVICES: [VALIDATOR]}

    updateNodeDataAndReconnect(looper, newSteward, newStewardWallet, newNode,
                               node_data, tdirWithPoolTxns, tconf,
                               txnPoolNodeSet + [newNode])
Esempio n. 39
0
def sdk_node_theta_added(looper,
                         txnPoolNodeSet,
                         tdir,
                         tconf,
                         sdk_pool_handle,
                         sdk_wallet_steward,
                         allPluginsPath,
                         testNodeClass=None,
                         name=None):
    new_steward_name = "testClientSteward" + randomString(3)
    new_node_name = name or "Theta"
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     new_steward_name,
                                     new_node_name,
                                     tdir,
                                     tconf,
                                     allPluginsPath,
                                     nodeClass=testNodeClass)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    return new_steward_wallet, new_node
def test_order_after_demote_and_restart(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath,
                                        sdk_wallet_stewards):
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 3, 3)

    primary_node = txnPoolNodeSet[0]
    node_to_stop = txnPoolNodeSet[1]
    node_to_demote = txnPoolNodeSet[2]
    txnPoolNodeSet.remove(node_to_demote)

    node_to_stop.cleanupOnStopping = True
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf,
                              pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 1, 1)

    def get_current_bls_keys(node):
        return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys

    assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
Esempio n. 41
0
def add_new_node(looper,
                 nodes,
                 sdk_pool_handle,
                 sdk_wallet_steward,
                 tdir,
                 tconf,
                 all_plugins_path,
                 name=None):
    node_name = name or "Psi"
    new_steward_name = "testClientSteward" + randomString(3)
    _, new_node = sdk_add_new_steward_and_node(looper,
                                               sdk_pool_handle,
                                               sdk_wallet_steward,
                                               new_steward_name,
                                               node_name,
                                               tdir,
                                               tconf,
                                               allPluginsPath=all_plugins_path)
    nodes.append(new_node)
    looper.run(checkNodesConnected(nodes))
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(nodes))
    waitNodeDataEquality(looper,
                         new_node,
                         *nodes[:-1],
                         customTimeout=timeout,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    sdk_pool_refresh(looper, sdk_pool_handle)
    return new_node
Esempio n. 42
0
def test_cancel_request_cp_and_ls_after_catchup(txnPoolNodeSet, looper,
                                                sdk_pool_handle,
                                                sdk_wallet_steward, tconf,
                                                tdir, allPluginsPath):
    '''Test cancel of schedule with requesting ledger statuses and consistency
    proofs after catchup.'''
    node_to_disconnect = txnPoolNodeSet[-1]
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    # restart node
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    # add node_to_disconnect to pool
    node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf,
                                            tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet)

    # check cancel of schedule with requesting ledger statuses and consistency proofs
    for event in node_to_disconnect.timer._events:
        name = event.callback.__name__
        assert name != '_reask_for_ledger_status'
        assert name != '_reask_for_last_consistency_proof'
def test_removed_replica_restored_on_view_change(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        tconf, tdir, allPluginsPath, chkFreqPatched, view_change):
    """
    1. Remove replica on some node which is not master primary
    2. Reconnect the node which was master primary so far
    3. Check that nodes and replicas correctly added
    """
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    node = get_last_master_non_primary_node(txnPoolNodeSet)
    start_replicas_count = node.replicas.num_replicas
    instance_id = start_replicas_count - 1

    node.replicas.remove_replica(instance_id)
    check_replica_removed(node, start_replicas_count, instance_id)

    # trigger view change on all nodes
    master_primary = get_master_primary_node(txnPoolNodeSet)
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_primary)
    txnPoolNodeSet.remove(master_primary)
    looper.removeProdable(master_primary)
    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    restarted_node = start_stopped_node(master_primary, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(restarted_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))

    waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=1,
                      customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    assert start_replicas_count == node.replicas.num_replicas
Esempio n. 44
0
def sdk_node_theta_added(looper,
                         txnPoolNodeSet,
                         tdir,
                         tconf,
                         sdk_pool_handle,
                         sdk_wallet_steward,
                         allPluginsPath,
                         testNodeClass=TestNode,
                         name=None):
    new_steward_name = "testClientSteward" + randomString(3)
    new_node_name = name or "Theta"
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     new_steward_name,
                                     new_node_name,
                                     tdir,
                                     tconf,
                                     allPluginsPath,
                                     nodeClass=testNodeClass)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    return new_steward_wallet, new_node
Esempio n. 45
0
def updateNodeDataAndReconnect(looper, steward, stewardWallet, node, node_data,
                               tdirWithPoolTxns, tconf, txnPoolNodeSet):
    updateNodeData(looper, steward, stewardWallet, node, node_data)
    # restart the Node with new HA
    node.stop()
    node_alias = node_data.get(ALIAS, None) or node.name
    node_ip = node_data.get(NODE_IP, None) or node.nodestack.ha.host
    node_port = node_data.get(NODE_PORT, None) or node.nodestack.ha.port
    client_ip = node_data.get(CLIENT_IP, None) or node.clientstack.ha.host
    client_port = node_data.get(CLIENT_PORT, None) or node.clientstack.ha.port
    looper.removeProdable(name=node.name)
    restartedNode = TestNode(node_alias,
                             basedirpath=tdirWithPoolTxns,
                             base_data_dir=tdirWithPoolTxns,
                             config=tconf,
                             ha=HA(node_ip, node_port),
                             cliha=HA(client_ip, client_port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    return restartedNode
def testNodeRemoveUnknownRemote(allPluginsPath, tdir_for_func, tconf_for_func,
                                looper,
                                txnPoolNodeSetNotStarted):
    """
    The nodes Alpha and Beta know about each other so they should connect but
    they should remove remote for C when it tries to connect to them
    """
    nodes = txnPoolNodeSetNotStarted[:2]

    for node in nodes:
        tellKeysToOthers(node, nodes)

    A, B = nodes
    for node in nodes:
        looper.add(node)
    looper.run(checkNodesConnected(nodes))

    C = txnPoolNodeSetNotStarted[2]
    for node in nodes:
        tellKeysToOthers(node, [C, ])

    looper.add(C)
    looper.runFor(5)

    stopNodes([C, ], looper)
    stopNodes([A, B], looper)
    for node in [A, B, C]:
        looper.removeProdable(node)
def testNodeKeysChanged(looper, txnPoolNodeSet, tdir,
                        tconf, sdk_node_theta_added,
                        sdk_pool_handle,
                        allPluginsPath=None):
    new_steward_wallet, new_node = sdk_node_theta_added

    new_node.stop()
    looper.removeProdable(name=new_node.name)
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = base58.b58encode(SimpleSigner(seed=sigseed).naclSigner.verraw).decode("utf-8")
    sdk_change_node_keys(looper, new_node, new_steward_wallet, sdk_pool_handle, verkey)

    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    initNodeKeysForBothStacks(new_node.name, config_helper.keys_dir, sigseed,
                              override=True)

    logger.debug("{} starting with HAs {} {}".format(new_node, nodeHa, nodeCHa))

    node = TestNode(new_node.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node

    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
def test_restarted_node_catches_up_config_ledger_txns(looper,
                                                      some_config_txns_done,
                                                      txnPoolNodeSet,
                                                      sdk_wallet_client,
                                                      sdk_pool_handle,
                                                      sdk_new_node_caught_up,
                                                      keys,
                                                      tconf,
                                                      tdir,
                                                      allPluginsPath):
    """
    A node is stopped, a few config ledger txns happen,
    the stopped node is started and catches up the config ledger
    """
    new_node = sdk_new_node_caught_up
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, new_node, stopNode=True)
    looper.removeProdable(new_node)

    # Do some config txns; using a fixture as a method, passing some arguments
    # as None as they only make sense for the fixture (pre-requisites)
    send_some_config_txns(looper, sdk_pool_handle, sdk_wallet_client, keys)

    # Make sure new node got out of sync
    for node in txnPoolNodeSet[:-1]:
        assert new_node.configLedger.size < node.configLedger.size

    restarted_node = start_stopped_node(new_node, looper, tconf, tdir,
                                        allPluginsPath)
    txnPoolNodeSet[-1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))

    waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1])
Esempio n. 49
0
def add_started_node(looper, new_node, node_ha, client_ha, txnPoolNodeSet,
                     sdk_pool_handle, sdk_wallet_steward, bls_key, key_proof):
    '''
    Adds already created node to the pool,
    that is sends NODE txn.
    Makes sure that node is actually added and connected to all otehr nodes.
    '''
    new_steward_wallet_handle = sdk_add_new_nym(looper,
                                                sdk_pool_handle,
                                                sdk_wallet_steward,
                                                "Steward" + new_node.name,
                                                role=STEWARD_STRING)
    node_name = new_node.name
    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper,
                         new_steward_wallet_handle,
                         sdk_pool_handle,
                         node_dest,
                         node_name,
                         node_ha[0],
                         node_ha[1],
                         client_ha[0],
                         client_ha[1],
                         services=[VALIDATOR],
                         bls_key=bls_key,
                         key_proof=key_proof)

    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                               sdk_pool_handle)

    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes):
    """
    Case 5 - A node making primary declarations for a multiple other nodes.
    Consider 4 nodes A, B, C, and D. Lets say node B is malicious and
    declares node C as primary to all nodes.
    Again node B declares node D as primary to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    BRep = Replica.generateName(B.name, 0)
    CRep = Replica.generateName(C.name, 0)
    DRep = Replica.generateName(D.name, 0)

    # Node B first sends PRIMARY msgs for Node C to all nodes
    B.send(Primary(CRep, 0, B.viewNo))
    # Node B sends PRIMARY msgs for Node D to all nodes
    B.send(Primary(DRep, 0, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have primary declarations for node C from node B
    #  since node B first nominated node C
    for node in [A, C, D]:
        logger.debug("node {} should have primary declaration for C from node B".format(node))
        assert node.elector.primaryDeclarations[0][BRep] == CRep
def test_state_regenerated_from_ledger(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                            nodeSet, tconf,
                                            trustee, trusteeWallet,
                                            allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(getClientAddedWithRole(nodeSet,
                                                    tdirWithPoolTxns, looper,
                                                    trustee, trusteeWallet,
                                                    'TA' + str(i),
                                                    role=TRUST_ANCHOR))
        addRawAttribute(looper, *trust_anchors[-1], randomString(6),
                        randomString(10), dest=trust_anchors[-1][1].defaultId)

    for tc, tw in trust_anchors:
        for i in range(3):
            getClientAddedWithRole(nodeSet,
                                 tdirWithPoolTxns,
                                 looper,
                                 tc, tw,
                                 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv._dbPath
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    restarted_node = TestNode(node_to_stop.name, basedirpath=tdirWithPoolTxns,
                        config=tconf, pluginPaths=allPluginsPath,
                        ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for tc, tw in trust_anchors:
        getClientAddedWithRole(nodeSet,
                             tdirWithPoolTxns,
                             looper,
                             tc, tw,
                             'NP--{}'.format(tc.name))

    ensure_all_nodes_have_same_data(looper, nodeSet)
def elections_done(case_6_setup, looper, txnPoolNodeSet):
    # Make sure elections are done successfully
    A, B, C, D = txnPoolNodeSet
    looper.run(checkNodesConnected(txnPoolNodeSet))

    inst_ids = (0, 1)

    def chk():
        # Check that each Primary is received by A before A has sent any
        # Primary
        primary_recv_times = {
            i: [entry.starttime for entry in A.elector.spylog.getAll(
                A.elector.processPrimary) if entry.params['prim'].instId == i]
            for i in inst_ids
        }
        primary_send_times = {
            i: [entry.starttime for entry in A.elector.spylog.getAll(
                A.elector.sendPrimary) if entry.params['instId'] == 0]
            for i in inst_ids
        }

        for i in inst_ids:
            assert primary_send_times[i][0] > max(primary_recv_times[i])

    looper.run(eventually(chk, retryWait=1, timeout=15))
    checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1)

    # Make sure no Nominations or Primary are received by A from B
    for i in inst_ids:
        assert B.replicas[i].name not in A.elector.nominations[i]
        assert B.replicas[i].name not in A.elector.primaryDeclarations[i]
def test_get_last_ordered_timestamp_after_catchup(looper,
                                                  txnPoolNodeSet,
                                                  sdk_pool_handle,
                                                  sdk_wallet_steward,
                                                  tconf,
                                                  tdir,
                                                  allPluginsPath):
    node_to_disconnect = txnPoolNodeSet[-1]
    reply_before = sdk_send_random_and_check(looper,
                                             txnPoolNodeSet,
                                             sdk_pool_handle,
                                             sdk_wallet_steward,
                                             1)[0][1]
    looper.runFor(2)
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    reply = sdk_send_random_and_check(looper,
                                      txnPoolNodeSet,
                                      sdk_pool_handle,
                                      sdk_wallet_steward,
                                      1)[0][1]

    node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf,
                                            tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet[:-1])
    ts_from_state = node_to_disconnect.master_replica._get_last_timestamp_from_state(DOMAIN_LEDGER_ID)
    assert ts_from_state == get_txn_time(reply['result'])
    assert ts_from_state != get_txn_time(reply_before['result'])
def testPrimaryElectionCase4(case4Setup, looper):
    """
    Case 4 - A node making multiple primary declarations for a particular node.
    Consider 4 nodes A, B, C and D. Lets say node B is malicious and is
    repeatedly declaring Node D as primary
    """
    allNodes = case4Setup
    A, B, C, D = allNodes

    looper.run(checkNodesConnected(allNodes))

    # Node B sends multiple declarations of node D's 0th protocol instance as
    # primary to all nodes
    for i in range(5):
        B.send(Primary(D.name, 0, B.viewNo))

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one primary declaration for node
    # D since node D is slow. The one primary declaration for node D,
    # that nodes A, C and D might have would be because of node B
    def x():
        primDecs = list(node.elector.primaryDeclarations[0].values())
        assert primDecs.count(D.name) <= 1

    for node in (A, C, D):
        looper.run(eventually(x, retryWait=.5, timeout=2))

    ensureElectionsDone(looper=looper, nodes=allNodes,
                        retryWait=1, timeout=45)

    # Node D should not have any primary replica
    assert not D.hasPrimary
Esempio n. 55
0
def nodeSetWithNodeAddedAfterSomeTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    looper, newNode, client, wallet, newStewardClient, newStewardWallet = nodeCreatedAfterSomeTxns
    txnPoolNodeSet.append(newNode)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=10))
    looper.run(newStewardClient.ensureConnectedToNodes())
    looper.run(client.ensureConnectedToNodes())
    return looper, newNode, client, wallet, newStewardClient, newStewardWallet
Esempio n. 56
0
def testNodeCatchupAfterRestart(newNodeCaughtUp, txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """

    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Stopping node {} with pool ledger size {}".
                 format(newNode, newNode.poolManager.txnSeqNo))
    ensureNodeDisconnectedFromPool(looper, txnPoolNodeSet, newNode)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    logger.debug("Starting the stopped node, {}".format(newNode))
    newNode.start(looper.loop)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(eventually(checkNodeLedgersForEquality, newNode,
                          *txnPoolNodeSet[:4], retryWait=1, timeout=15))
Esempio n. 57
0
def sdk_node_set_with_node_added_after_some_txns(
        txnPoolNodeSet, sdk_node_created_after_some_txns):
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_created_after_some_txns
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    return looper, new_node, sdk_pool_handle, new_steward_wallet_handle
def test_steward_suspends_node_and_promote_with_new_ha(
        looper, txnPoolNodeSet,
        tdir, tconf,
        sdk_pool_handle,
        sdk_wallet_steward,
        sdk_node_theta_added,
        poolTxnStewardData,
        allPluginsPath):
    new_steward_wallet, new_node = sdk_node_theta_added
    looper.run(checkNodesConnected(txnPoolNodeSet + [new_node]))
    demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node)
    # Check suspended node does not exist in any nodeReg or remotes of
    # nodes or clients

    txnPoolNodeSet = txnPoolNodeSet[:-1]
    for node in txnPoolNodeSet:
        looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name))
    # Check that a node does not connect to the suspended
    # node
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
    with pytest.raises(RemoteNotFound):
        looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node))

    new_node.stop()
    looper.removeProdable(new_node)

    # Check that a node whose suspension is revoked can reconnect to other
    # nodes and clients can also connect to that node
    node_ha, client_ha = genHa(2)
    node_nym = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet,
                         sdk_pool_handle, node_nym, new_node.name,
                         node_ha.host, node_ha.port,
                         client_ha.host, client_ha.port,
                         services=[VALIDATOR])
    new_node.nodestack.ha = node_ha
    new_node.clientstack.ha = client_ha
    nodeTheta = start_stopped_node(new_node, looper, tconf,
                                   tdir, allPluginsPath,
                                   delay_instance_change_msgs=False)
    assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet)
    txnPoolNodeSet.append(nodeTheta)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
Esempio n. 59
0
def testKeyShareParty(tdir_for_func):
    """
    connections to all nodes should be successfully established when key
    sharing is enabled.
    """
    nodeReg = genNodeReg(5)

    logger.debug("-----sharing keys-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            for n in nodeSet:
                n.startKeySharing()
            looper.run(checkNodesConnected(nodeSet))

    logger.debug("-----key sharing done, connect after key sharing-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as loop:
            loop.run(checkNodesConnected(nodeSet), msgAll(nodeSet))