def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir, tconf,
                                         sdk_pool_handle, sdk_wallet_client,
                                         sdk_wallet_steward):
    new_steward_wallet, new_node = \
        sdk_add_new_steward_and_node(looper,
                                     sdk_pool_handle,
                                     sdk_wallet_steward,
                                     'AnotherSteward' + randomString(4),
                                     'AnotherNode' + randomString(4),
                                     tdir,
                                     tconf)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)

    node_new_ha, client_new_ha = genHa(2)
    logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha,
                                                   client_new_ha))

    # Making the change HA txn an confirming its succeeded
    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name, node_new_ha.host,
                         node_new_ha.port, client_new_ha.host,
                         client_new_ha.port)

    # Stopping existing nodes
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    # Starting nodes again by creating `Node` objects since that simulates
    # what happens when starting the node with script
    restartedNodes = []
    for node in txnPoolNodeSet[:-1]:
        config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
        restartedNode = TestNode(node.name,
                                 config_helper=config_helper,
                                 config=tconf,
                                 ha=node.nodestack.ha,
                                 cliha=node.clientstack.ha)
        looper.add(restartedNode)
        restartedNodes.append(restartedNode)

    # Starting the node whose HA was changed
    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    node = TestNode(new_node.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=node_new_ha,
                    cliha=client_new_ha)
    looper.add(node)
    restartedNodes.append(node)

    looper.run(checkNodesConnected(restartedNodes))
    waitNodeDataEquality(looper, node, *restartedNodes[:-1])
    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client,
                               sdk_pool_handle)
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir, tdirWithPoolTxns,
                                         tdirWithClientPoolTxns, tconf, steward1,
                                         stewardWallet, nodeThetaAdded,
                                         poolTxnClientData):
    newSteward, newStewardWallet, newNode = nodeThetaAdded
    nodeNewHa, clientNewHa = genHa(2)
    logger.debug("{} changing HAs to {} {}".format(newNode, nodeNewHa,
                                                   clientNewHa))

    # Making the change HA txn an confirming its succeeded
    op = {
        ALIAS: newNode.name,
        NODE_IP: nodeNewHa.host,
        NODE_PORT: nodeNewHa.port,
        CLIENT_IP: clientNewHa.host,
        CLIENT_PORT: clientNewHa.port,
    }
    updateNodeData(looper, newSteward, newStewardWallet, newNode,
                   op)

    # Stopping existing nodes
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    # Starting nodes again by creating `Node` objects since that simulates
    # what happens when starting the node with script
    restartedNodes = []
    for node in txnPoolNodeSet[:-1]:
        config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
        restartedNode = TestNode(node.name,
                                 config_helper=config_helper,
                                 config=tconf, ha=node.nodestack.ha,
                                 cliha=node.clientstack.ha)
        looper.add(restartedNode)
        restartedNodes.append(restartedNode)

    # Starting the node whose HA was changed
    config_helper = PNodeConfigHelper(newNode.name, tconf, chroot=tdir)
    node = TestNode(newNode.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=nodeNewHa, cliha=clientNewHa)
    looper.add(node)
    restartedNodes.append(node)

    looper.run(checkNodesConnected(restartedNodes))
    waitNodeDataEquality(looper, node, *restartedNodes[:-1])

    # Building a new client that reads from the genesis txn file
    # but is able to connect to all nodes
    client, wallet = buildPoolClientAndWallet(poolTxnClientData,
                                              tdirWithClientPoolTxns)
    looper.add(client)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, client,
                                                  *restartedNodes)
def testNodeKeysChanged(looper, txnPoolNodeSet, tdir,
                        tconf, sdk_node_theta_added,
                        sdk_pool_handle,
                        allPluginsPath=None):
    new_steward_wallet, new_node = sdk_node_theta_added

    new_node.stop()
    looper.removeProdable(name=new_node.name)
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = base58.b58encode(SimpleSigner(seed=sigseed).naclSigner.verraw).decode("utf-8")
    sdk_change_node_keys(looper, new_node, new_steward_wallet, sdk_pool_handle, verkey)

    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    initNodeKeysForBothStacks(new_node.name, config_helper.keys_dir, sigseed,
                              override=True)

    logger.debug("{} starting with HAs {} {}".format(new_node, nodeHa, nodeCHa))

    node = TestNode(new_node.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node

    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
Пример #4
0
    def addNode(self, name: str) -> TestNode:
        if name in self.nodes:
            error("{} already added".format(name))
        assert name in self.nodeReg
        ha, cliname, cliha = self.nodeReg[name]

        config_helper = PNodeConfigHelper(name,
                                          self.config,
                                          chroot=self.tmpdir)

        seed = randomSeed()
        if self.keyshare:
            learnKeysFromOthers(config_helper.keys_dir, name,
                                self.nodes.values())

        testNodeClass = self.testNodeClass
        node = self.enter_context(
            testNodeClass(name=name,
                          ha=ha,
                          cliname=cliname,
                          cliha=cliha,
                          config_helper=config_helper,
                          primaryDecider=self.primaryDecider,
                          pluginPaths=self.pluginPaths,
                          seed=seed))

        if self.keyshare:
            tellKeysToOthers(node, self.nodes.values())

        self.nodes[name] = node
        self.__dict__[name] = node
        return node
Пример #5
0
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, start_one_by_one=True, wait_for_elections=True):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set.copy():
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)

        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        restart_set[idx] = restarted_node

        rest_nodes += [restarted_node]
        if start_one_by_one:
            looper.run(checkNodesConnected(rest_nodes))

    if not start_one_by_one:
        looper.run(checkNodesConnected(nodeSet))

    if wait_for_elections:
        ensureElectionsDone(looper=looper, nodes=nodeSet)
Пример #6
0
def update_node_data_and_reconnect(looper, txnPoolNodeSet, steward_wallet,
                                   sdk_pool_handle, node, new_node_ip,
                                   new_node_port, new_client_ip,
                                   new_client_port, tdir, tconf):
    node_ha = node.nodestack.ha
    cli_ha = node.clientstack.ha
    node_dest = hexToFriendly(node.nodestack.verhex)
    sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, node_dest,
                         node.name, new_node_ip, new_node_port, new_client_ip,
                         new_client_port)
    # restart the Node with new HA
    node.stop()
    looper.removeProdable(name=node.name)
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=HA(new_node_ip or node_ha.host, new_node_port
                                   or node_ha.port),
                             cliha=HA(new_client_ip or cli_ha.host,
                                      new_client_port or cli_ha.port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, steward_wallet,
                               sdk_pool_handle)
    return restartedNode
Пример #7
0
def test_state_regenerated_from_ledger(looper, txnPoolNodeSet, client1,
                                       wallet1, client1Connected, tdir, tconf,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start
    """
    sent_batches = 10
    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1,
                                           5 * sent_batches, sent_batches)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    node_to_stop = txnPoolNodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    nodeHa, nodeCHa = HA(*node_to_stop.nodestack.ha), HA(
        *node_to_stop.clientstack.ha)

    node_to_stop.stop()
    looper.removeProdable(node_to_stop)

    shutil.rmtree(state_db_path)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              ha=nodeHa,
                              cliha=nodeCHa,
                              pluginPaths=allPluginsPath)
    looper.add(restarted_node)
    txnPoolNodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1])
Пример #8
0
def updateNodeDataAndReconnect(looper, steward, stewardWallet, node, node_data,
                               tdir, tconf, txnPoolNodeSet):
    updateNodeData(looper, steward, stewardWallet, node, node_data)
    # restart the Node with new HA
    node.stop()
    node_alias = node_data.get(ALIAS, None) or node.name
    node_ip = node_data.get(NODE_IP, None) or node.nodestack.ha.host
    node_port = node_data.get(NODE_PORT, None) or node.nodestack.ha.port
    client_ip = node_data.get(CLIENT_IP, None) or node.clientstack.ha.host
    client_port = node_data.get(CLIENT_PORT, None) or node.clientstack.ha.port
    looper.removeProdable(name=node.name)
    config_helper = PNodeConfigHelper(node_alias, tconf, chroot=tdir)
    restartedNode = TestNode(node_alias,
                             config_helper=config_helper,
                             config=tconf,
                             ha=HA(node_ip, node_port),
                             cliha=HA(client_ip, client_port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    return restartedNode
Пример #9
0
def testNodesConnectWhenTheyAllStartAtOnce(allPluginsPath, tdir_for_func,
                                           tconf_for_func,
                                           looper_without_nodeset_for_func,
                                           nodeReg):
    looper = looper_without_nodeset_for_func
    nodes = []

    initLocalKeys(tdir_for_func, tconf_for_func, nodeReg)

    for name in nodeReg:
        config_helper = PNodeConfigHelper(name,
                                          tconf_for_func,
                                          chroot=tdir_for_func)
        node = TestNode(name,
                        nodeReg,
                        config_helper=config_helper,
                        config=tconf_for_func,
                        pluginPaths=allPluginsPath)
        nodes.append(node)

    for node in nodes:
        tellKeysToOthers(node, nodes)

    for node in nodes:
        looper.add(node)

    looper.run(checkNodesConnected(nodes))
    stopNodes(nodes, looper)
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, per_add_timeout=None):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set:
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)
        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        if per_add_timeout:
            looper.run(checkNodesConnected(rest_nodes + [restarted_node], customTimeout=per_add_timeout))
        rest_nodes += [restarted_node]

    if not per_add_timeout:
        looper.run(checkNodesConnected(nodeSet, customTimeout=after_restart_timeout))
Пример #11
0
def test_order_after_demote_and_restart(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath,
                                        sdk_wallet_stewards):
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 3, 3)

    primary_node = txnPoolNodeSet[0]
    node_to_stop = txnPoolNodeSet[1]
    node_to_demote = txnPoolNodeSet[2]
    txnPoolNodeSet.remove(node_to_demote)

    node_to_stop.cleanupOnStopping = True
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf,
                              pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, check_primaries=False)

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 1, 1)

    def get_current_bls_keys(node):
        return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys

    assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet,
                                      tdir, tconf,
                                      poolTxnNodeNames, allPluginsPath,
                                      sdk_wallet_new_client,
                                      sdk_pool_handle):
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_new_client, 1)
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    txnPoolNodeSet = []
    for nm in poolTxnNodeNames:
        config_helper = PNodeConfigHelper(nm, tconf, chroot=tdir)
        node = TestNode(nm,
                        config_helper=config_helper,
                        config=tconf, pluginPaths=allPluginsPath)
        looper.add(node)
        txnPoolNodeSet.append(node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    def chk():
        for node in txnPoolNodeSet:
            assert node.isParticipating

    timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))

    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_new_client, sdk_pool_handle)
Пример #13
0
def testNodeCatchupFPlusOne(looper, txnPoolNodeSet, sdk_pool_handle,
                            sdk_wallet_steward, tconf, tdir, tdirWithPoolTxns,
                            allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    # Excluding state check since the node is stopped hence the state db is closed
    waitNodeDataInequality(looper,
                           node0,
                           *txnPoolNodeSet[:-2],
                           exclude_from_check=['check_state'])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
Пример #14
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns,
                 tconf, shouldBePrimary, tdir,
                 sdk_pool_handle, sdk_wallet_stewards):
    # prepare new ha for node and client stack
    subjectedNode = None
    node_index = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            node_index = nodeIndex
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    # change HA
    sdk_wallet_steward = sdk_wallet_stewards[node_index]
    node_dest = hexToFriendly(subjectedNode.nodestack.verhex)
    sdk_send_update_node(looper, sdk_wallet_steward,
                         sdk_pool_handle,
                         node_dest, subjectedNode.name,
                         nodeStackNewHA[0], nodeStackNewHA[1],
                         clientStackNewHA[0], clientStackNewHA[1],
                         services=[VALIDATOR])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))
    sdk_pool_refresh(looper, sdk_pool_handle)

    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_steward,
                              8)
Пример #15
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns,
                 poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary, tdir):
    # prepare new ha for node and client stack
    subjectedNode = None
    stewardName = None
    stewardsSeed = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            stewardName = poolTxnStewardNames[nodeIndex]
            stewardsSeed = poolTxnData["seeds"][stewardName].encode()
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    nodeSeed = poolTxnData["seeds"][subjectedNode.name].encode()

    # change HA
    stewardClient, req = changeHA(looper, tconf, subjectedNode.name, nodeSeed,
                                  nodeStackNewHA, stewardName, stewardsSeed,
                                  basedir=tdirWithClientPoolTxns)

    waitForSufficientRepliesForRequests(looper, stewardClient,
                                        requests=[req])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))

    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=DidSigner(seed=stewardsSeed))
    sendReqsToNodesAndVerifySuffReplies(
        looper, stewardWallet, stewardClient, 8)
Пример #16
0
def prepare_new_node_data(tconf, tdir,
                          newNodeName):
    sigseed = randomString(32).encode()
    (nodeIp, nodePort), (clientIp, clientPort) = genHa(2)
    config_helper = PNodeConfigHelper(newNodeName, tconf, chroot=tdir)
    _, verkey, bls_key = initNodeKeysForBothStacks(newNodeName, config_helper.keys_dir,
                                                   sigseed, override=True)
    return sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort
 def create(name):
     config_helper = PNodeConfigHelper(name, tconf_for_func, chroot=tdir_for_func)
     node = TestNode(name, nodeReg,
                     config_helper=config_helper,
                     config=tconf_for_func,
                     pluginPaths=allPluginsPath)
     nodes.append(node)
     return node
Пример #18
0
def test_valid_txn_with_fees(helpers, mint_tokens, fees_set,
                             nodeSetWithIntegratedTokenPlugin, looper,
                             address_main, addresses, tdir, tconf):
    seq_no = get_seq_no(mint_tokens)
    remaining = 1000
    last_node = nodeSetWithIntegratedTokenPlugin[-1]
    last_node.stop()
    looper.removeProdable(last_node)
    token_req_handler = last_node.get_req_handler(TOKEN_LEDGER_ID)
    token_req_handler.utxo_cache._store.close()

    nodeSetWithIntegratedTokenPlugin = nodeSetWithIntegratedTokenPlugin[:-1]

    for address in addresses:
        inputs = [
            {
                ADDRESS: address_main,
                SEQNO: seq_no
            },
        ]
        outputs = [
            {
                ADDRESS: address,
                AMOUNT: 1
            },
            {
                ADDRESS: address_main,
                AMOUNT: remaining - 2
            },  # XFER fee is 1
        ]
        request = helpers.request.transfer(inputs, outputs)
        response = helpers.sdk.send_and_check_request_objects([request])
        result = helpers.sdk.get_first_result(response)
        seq_no = get_seq_no(result)
        remaining -= 2

    for _ in range(5):
        pay_fees(helpers, fees_set, address_main)

    config_helper = PNodeConfigHelper(last_node.name, tconf, chroot=tdir)
    restarted_node = TestNode(last_node.name,
                              config_helper=config_helper,
                              config=tconf,
                              ha=last_node.nodestack.ha,
                              cliha=last_node.clientstack.ha)

    integrate_token_plugin_in_node(restarted_node)
    integrate_fees_plugin_in_node(restarted_node)

    tl = restarted_node.getLedger(TOKEN_LEDGER_ID)
    for node in nodeSetWithIntegratedTokenPlugin:
        token_ledger = node.getLedger(TOKEN_LEDGER_ID)
        assert token_ledger.size > tl.size

    looper.add(restarted_node)
    nodeSetWithIntegratedTokenPlugin.append(restarted_node)

    ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)
Пример #19
0
def new_node(node_name, tdir, node_ha, client_ha, tconf, plugin_path,
             nodeClass):
    config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir)
    node = nodeClass(node_name,
                     config_helper=config_helper,
                     config=tconf,
                     ha=node_ha,
                     cliha=client_ha,
                     pluginPaths=plugin_path)
    return node
Пример #20
0
def testChangeNodeHaBack(looper, txnPoolNodeSet, tdir, tconf,
                         steward1, stewardWallet, nodeThetaAdded):
    """
    The case:
        The Node HA is updated with some HA (let's name it 'correct' HA).
        Then the Steward makes a mistake and sends the NODE txn with other HA
        ('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending
        yet another one NODE txn.
    """

    steward, stewardWallet, theta = nodeThetaAdded
    clientHa = theta.cliNodeReg['ThetaC']  # use the same client HA
    # do all exercises without the Node
    theta.stop()
    looper.removeProdable(name=theta.name)

    # step 1: set 'correct' HA
    correctNodeHa = genHa(1)
    op = {
        ALIAS: theta.name,
        NODE_IP: correctNodeHa.host,
        NODE_PORT: correctNodeHa.port,
        CLIENT_IP: clientHa.host,
        CLIENT_PORT: clientHa.port,
    }
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # step 2: set 'wrong' HA
    wrongNodeHa = genHa(1)
    op.update({NODE_IP: wrongNodeHa.host, NODE_PORT: wrongNodeHa.port})
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # step 3: set 'correct' HA back
    op.update({NODE_IP: correctNodeHa.host, NODE_PORT: correctNodeHa.port})
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # In order to save the time the pool connection is not maintaining
    # during the steps, only the final result is checked.
    config_helper = PNodeConfigHelper(theta.name, tconf, chroot=tdir)
    restartedNode = TestNode(theta.name,
                             ledger_dir=config_helper.ledger_dir,
                             keys_dir=config_helper.keys_dir,
                             genesis_dir=config_helper.genesis_dir,
                             plugins_dir=config_helper.plugins_dir,
                             config=tconf, ha=correctNodeHa, cliha=clientHa)
    looper.add(restartedNode)
    txnPoolNodeSet[-1] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    # check Theta HA
    for n in txnPoolNodeSet:
        assert n.nodeReg['Theta'] == correctNodeHa
def test_catchup_with_lost_first_consistency_proofs(txnPoolNodeSet,
                                                    looper,
                                                    sdk_pool_handle,
                                                    sdk_wallet_steward,
                                                    tconf,
                                                    tdir,
                                                    allPluginsPath,
                                                    monkeypatch,
                                                    lost_count):
    '''Skip processing of first lost_count CONSISTENCY_PROOFs in catchup. In
    this case catchup node has no quorum with f+1 CONSISTENCY_PROOFs for the
    longer transactions list. It need to request CONSISTENCY_PROOFs again and
    finishes catchup.
    Test makes sure that the node eventually finishes catchup'''
    node_to_disconnect = txnPoolNodeSet[-1]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA(
        *node_to_disconnect.clientstack.ha)
    config_helper = PNodeConfigHelper(node_to_disconnect.name, tconf,
                                      chroot=tdir)
    node_to_disconnect = TestNode(node_to_disconnect.name,
                                  config_helper=config_helper,
                                  config=tconf,
                                  ha=nodeHa, cliha=nodeCHa,
                                  pluginPaths=allPluginsPath)

    def unpatch_after_call(proof, frm):
        global call_count
        call_count += 1
        if call_count >= lost_count:
            # unpatch processConsistencyProof after lost_count calls
            node_to_disconnect.nodeMsgRouter.add((ConsistencyProof,
                                                  node_to_disconnect.ledgerManager.processConsistencyProof))
            call_count = 0

    # patch processConsistencyProof
    node_to_disconnect.nodeMsgRouter.add((ConsistencyProof, unpatch_after_call))
    # add node_to_disconnect to pool
    looper.add(node_to_disconnect)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
Пример #22
0
def initLocalKeys(tdir_for_func, tconf_for_func, nodeReg):
    for nName in nodeReg.keys():
        sigseed = randomString(32).encode()
        config_helper = PNodeConfigHelper(nName,
                                          tconf_for_func,
                                          chroot=tdir_for_func)
        initNodeKeysForBothStacks(nName,
                                  config_helper.keys_dir,
                                  sigseed,
                                  override=True)
        logger.debug('Created keys for {}'.format(nName))
def test_primary_selection_after_primary_demotion_and_pool_restart(
        looper, txnPoolNodeSet, stewardAndWalletForMasterNode,
        txnPoolMasterNodes, tdir, tconf):
    """
    Demote primary and restart the pool.
    Pool should select new primary and have viewNo=0 after restart.
    """

    logger.info(
        "1. turn off the node which has primary replica for master instanse")
    master_node = txnPoolMasterNodes[0]
    client, wallet = stewardAndWalletForMasterNode

    node_data = {ALIAS: master_node.name, SERVICES: []}
    updateNodeData(looper, client, wallet, master_node, node_data)

    restNodes = [
        node for node in txnPoolNodeSet if node.name != master_node.name
    ]
    ensureElectionsDone(looper, restNodes)

    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    logger.info("2. restart pool")
    # Stopping existing nodes
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    # Starting nodes again by creating `Node` objects since that simulates
    # what happens when starting the node with script
    restartedNodes = []
    for node in txnPoolNodeSet:
        config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
        restartedNode = TestNode(node.name,
                                 config_helper=config_helper,
                                 config=tconf,
                                 ha=node.nodestack.ha,
                                 cliha=node.clientstack.ha)
        looper.add(restartedNode)
        restartedNodes.append(restartedNode)

    restNodes = [
        node for node in restartedNodes if node.name != master_node.name
    ]

    looper.run(checkNodesConnected(restNodes))
    ensureElectionsDone(looper, restNodes)
    checkViewNoForNodes(restNodes, 0)
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    primariesIdxs = getPrimaryNodesIdxs(restNodes)
    assert restNodes[primariesIdxs[0]].name != master_node.name
def test_node(tdirWithPoolTxns, tdirWithDomainTxns, poolTxnNodeNames,
              tdirWithNodeKeepInited, tdir, tconf, allPluginsPath):
    node_name = poolTxnNodeNames[0]
    config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir)
    node = TestNode(node_name,
                    config_helper=config_helper,
                    config=tconf,
                    pluginPaths=allPluginsPath)
    node.view_changer = FakeSomething(view_change_in_progress=False, view_no=0)
    yield node
    node.onStopping()
Пример #25
0
def testNodesComingUpAtDifferentTimes(allPluginsPath, tdir_for_func,
                                      tconf_for_func,
                                      looper_without_nodeset_for_func,
                                      nodeReg):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    looper = looper_without_nodeset_for_func

    initLocalKeys(tdir_for_func, tconf_for_func, nodeReg)

    nodes = []

    names = list(nodeReg.keys())

    shuffle(names)
    waits = [randint(1, 10) for _ in names]
    rwaits = [randint(1, 10) for _ in names]

    for name in names:
        config_helper = PNodeConfigHelper(name,
                                          tconf_for_func,
                                          chroot=tdir_for_func)
        node = TestNode(name,
                        nodeReg,
                        config_helper=config_helper,
                        config=tconf_for_func,
                        pluginPaths=allPluginsPath)
        nodes.append(node)

    for node in nodes:
        tellKeysToOthers(node, nodes)

    for i, node in enumerate(nodes):
        looper.add(node)
        looper.runFor(waits[i])
    looper.run(checkNodesConnected(nodes))
    logger.debug("connects")
    logger.debug("node order: {}".format(names))
    logger.debug("waits: {}".format(waits))

    stopNodes(nodes, looper)

    # # Giving some time for sockets to close, use eventually
    # time.sleep(1)

    for i, n in enumerate(nodes):
        n.start(looper.loop)
        looper.runFor(rwaits[i])
    looper.runFor(3)
    looper.run(checkNodesConnected(nodes))
    stopNodes(nodes, looper)
    logger.debug("reconnects")
    logger.debug("node order: {}".format(names))
    logger.debug("rwaits: {}".format(rwaits))
Пример #26
0
def test_node(tdirWithPoolTxns, tdirWithDomainTxns, poolTxnNodeNames,
              tdirWithNodeKeepInited, tdir, tconf, allPluginsPath):
    node_name = poolTxnNodeNames[0]
    config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir)
    node = TestNode(node_name,
                    config_helper=config_helper,
                    config=tconf,
                    pluginPaths=allPluginsPath)
    yield node
    node.onStopping(
    )  # TODO stop won't call onStopping as we are in Stopped state
def test_catchup_with_lost_ledger_status(txnPoolNodeSet,
                                         looper,
                                         sdk_pool_handle,
                                         sdk_wallet_steward,
                                         tconf,
                                         tdir,
                                         allPluginsPath,
                                         monkeypatch,
                                         lost_count):
    '''Skip processing of lost_count Message Responses with LEDGER STATUS
    in catchup; test makes sure that the node eventually finishes catchup'''

    node_to_disconnect = txnPoolNodeSet[-1]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA(
        *node_to_disconnect.clientstack.ha)
    config_helper = PNodeConfigHelper(node_to_disconnect.name, tconf,
                                      chroot=tdir)
    node_to_disconnect = TestNode(node_to_disconnect.name,
                                  config_helper=config_helper,
                                  config=tconf,
                                  ha=nodeHa, cliha=nodeCHa,
                                  pluginPaths=allPluginsPath)

    def unpatch_after_call(status, frm):
        global call_count
        call_count += 1
        if call_count >= lost_count:
            # unpatch processLedgerStatus after lost_count calls
            node_to_disconnect.nodeMsgRouter.add((LedgerStatus, node_to_disconnect.ledgerManager.processLedgerStatus))
            call_count = 0

    # patch processLedgerStatus
    node_to_disconnect.nodeMsgRouter.add((LedgerStatus, unpatch_after_call))

    # add node_to_disconnect to pool
    looper.add(node_to_disconnect)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
Пример #28
0
def testNodeCatchupFPlusOne(txnPoolNodeSet, poolAfterSomeTxns, tconf, tdir,
                            tdirWithPoolTxns, allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """
    looper, client, wallet = poolAfterSomeTxns

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    waitNodeDataInequality(looper, node0, *txnPoolNodeSet[:-2])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
Пример #29
0
def addNodeBack(node_set, looper: Looper, node: Node, tconf, tdir) -> TestNode:
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=node.nodestack.ha,
                             cliha=node.clientstack.ha)
    for node in node_set:
        if node.name != restartedNode.name:
            node.nodestack.reconnectRemoteWithName(restartedNode.name)
    node_set.append(restartedNode)
    looper.add(restartedNode)
    return restartedNode
Пример #30
0
def start_newly_added_node(looper, node_name, tdir, sigseed, node_ha,
                           client_ha, tconf, auto_start, plugin_path,
                           nodeClass):
    config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir)
    node = nodeClass(node_name,
                     config_helper=config_helper,
                     config=tconf,
                     ha=node_ha,
                     cliha=client_ha,
                     pluginPaths=plugin_path)
    if auto_start:
        looper.add(node)
    return node