Exemple #1
0
def test_primary_selection_after_demoted_primary_node_promotion(
        looper, txnPoolNodeSet, stewardAndWalletForMasterNode,
        txnPoolMasterNodes):
    """
    Demote primary of master instance, wait for view change and promote it back.
    Check primaries for instances.
    """
    assert len(txnPoolNodeSet) == 4

    # Check primaries after test setup.
    primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet)
    assert len(primariesIdxs) == 2
    assert primariesIdxs[0] == 0
    assert primariesIdxs[1] == 1

    master_node = txnPoolMasterNodes[0]
    client, wallet = stewardAndWalletForMasterNode

    # Demote primary of master instance.
    node_data = {ALIAS: master_node.name, SERVICES: []}
    updateNodeData(looper, client, wallet, master_node, node_data)

    restNodes = [
        node for node in txnPoolNodeSet if node.name != master_node.name
    ]
    ensureElectionsDone(looper, restNodes)

    # Check that there is only one instance now, check it's primary.
    primariesIdxs = getPrimaryNodesIdxs(restNodes)
    assert len(primariesIdxs) == 1
    assert primariesIdxs[0] == 1

    # Ensure pool is working properly.
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    # Promote demoted node back.
    node_data = {ALIAS: master_node.name, SERVICES: [VALIDATOR]}
    updateNodeData(looper, client, wallet, master_node, node_data)

    # Ensure pool is working properly.
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    # Check that there are two instances again, check their primaries.
    primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet)
    assert len(primariesIdxs) == 2
    assert primariesIdxs[0] == 2
    assert primariesIdxs[1] == 3
def test_update_with_demoted_node(looper, nodeSet, validUpgrade,
                                  stewards_and_wallets, trustee, trusteeWallet):
    # demote one node
    node_steward_cl, steward_wallet = stewards_and_wallets[3]
    node_data = {
        ALIAS: nodeSet[3].name,
        SERVICES: []
    }
    updateNodeData(looper, node_steward_cl, steward_wallet, nodeSet[3], node_data)

    # remove demoted node from upgrade schedule
    upgr = validUpgrade
    del upgr[SCHEDULE][nodeSet[3].id]

    # send upgrade
    ensureUpgradeSent(looper, trustee, trusteeWallet, upgr)

    # check upg scheduled
    looper.run(eventually(checkUpgradeScheduled, nodeSet[:3], upgr[VERSION], retryWait=1,
                          timeout=waits.expectedUpgradeScheduled()))
Exemple #3
0
def change_bls_key(looper,
                   txnPoolNodeSet,
                   node,
                   steward_client,
                   steward_wallet,
                   add_wrong=False):
    new_blspk = init_bls_keys(node.keys_dir, node.name)

    key_in_txn = \
        new_blspk \
        if not add_wrong \
        else ''.join(random_from_alphabet(32, base58.alphabet))

    node_data = {ALIAS: node.name, BLS_KEY: key_in_txn}

    updateNodeData(looper, steward_client, steward_wallet, node, node_data)
    waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward_client,
                                                  *txnPoolNodeSet)
    return new_blspk
Exemple #4
0
def test_update_with_demoted_node(looper, nodeSet, validUpgrade,
                                  stewards_and_wallets, trustee,
                                  trusteeWallet):
    # demote one node
    node_steward_cl, steward_wallet = stewards_and_wallets[3]
    node_data = {ALIAS: nodeSet[3].name, SERVICES: []}
    updateNodeData(looper, node_steward_cl, steward_wallet, nodeSet[3],
                   node_data)

    # remove demoted node from upgrade schedule
    upgr = validUpgrade
    del upgr[SCHEDULE][nodeSet[3].id]

    # send upgrade
    ensureUpgradeSent(looper, trustee, trusteeWallet, upgr)

    # check upg scheduled
    looper.run(
        eventually(checkUpgradeScheduled,
                   nodeSet[:3],
                   upgr[VERSION],
                   retryWait=1,
                   timeout=waits.expectedUpgradeScheduled()))
Exemple #5
0
def testChangeNodeHaBack(looper, txnPoolNodeSet, tdir, tconf,
                         steward1, stewardWallet, nodeThetaAdded):
    """
    The case:
        The Node HA is updated with some HA (let's name it 'correct' HA).
        Then the Steward makes a mistake and sends the NODE txn with other HA
        ('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending
        yet another one NODE txn.
    """

    steward, stewardWallet, theta = nodeThetaAdded
    clientHa = theta.cliNodeReg['ThetaC']  # use the same client HA
    # do all exercises without the Node
    theta.stop()
    looper.removeProdable(name=theta.name)

    # step 1: set 'correct' HA
    correctNodeHa = genHa(1)
    op = {
        ALIAS: theta.name,
        NODE_IP: correctNodeHa.host,
        NODE_PORT: correctNodeHa.port,
        CLIENT_IP: clientHa.host,
        CLIENT_PORT: clientHa.port,
    }
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # step 2: set 'wrong' HA
    wrongNodeHa = genHa(1)
    op.update({NODE_IP: wrongNodeHa.host, NODE_PORT: wrongNodeHa.port})
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # step 3: set 'correct' HA back
    op.update({NODE_IP: correctNodeHa.host, NODE_PORT: correctNodeHa.port})
    updateNodeData(looper, steward, stewardWallet, theta,
                   op)

    # In order to save the time the pool connection is not maintaining
    # during the steps, only the final result is checked.
    config_helper = PNodeConfigHelper(theta.name, tconf, chroot=tdir)
    restartedNode = TestNode(theta.name,
                             ledger_dir=config_helper.ledger_dir,
                             keys_dir=config_helper.keys_dir,
                             genesis_dir=config_helper.genesis_dir,
                             plugins_dir=config_helper.plugins_dir,
                             config=tconf, ha=correctNodeHa, cliha=clientHa)
    looper.add(restartedNode)
    txnPoolNodeSet[-1] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    # check Theta HA
    for n in txnPoolNodeSet:
        assert n.nodeReg['Theta'] == correctNodeHa
Exemple #6
0
def change_bls_keys(new_bls_key, node, looper, client, wallet):
    node_data = {ALIAS: node.name, BLS_KEY: new_bls_key}
    updateNodeData(looper, client, wallet, node, node_data)
    return
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir,
                                         tdirWithPoolTxns,
                                         tdirWithClientPoolTxns, tconf,
                                         steward1, stewardWallet,
                                         nodeThetaAdded, poolTxnClientData):
    newSteward, newStewardWallet, newNode = nodeThetaAdded
    nodeNewHa, clientNewHa = genHa(2)
    logger.debug("{} changing HAs to {} {}".format(newNode, nodeNewHa,
                                                   clientNewHa))

    # Making the change HA txn an confirming its succeeded
    op = {
        ALIAS: newNode.name,
        NODE_IP: nodeNewHa.host,
        NODE_PORT: nodeNewHa.port,
        CLIENT_IP: clientNewHa.host,
        CLIENT_PORT: clientNewHa.port,
    }
    updateNodeData(looper, newSteward, newStewardWallet, newNode, op)

    # Stopping existing nodes
    for node in txnPoolNodeSet:
        node.stop()
        looper.removeProdable(node)

    # Starting nodes again by creating `Node` objects since that simulates
    # what happens when starting the node with script
    restartedNodes = []
    for node in txnPoolNodeSet[:-1]:
        config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
        restartedNode = TestNode(node.name,
                                 ledger_dir=config_helper.ledger_dir,
                                 keys_dir=config_helper.keys_dir,
                                 genesis_dir=config_helper.genesis_dir,
                                 plugins_dir=config_helper.plugins_dir,
                                 config=tconf,
                                 ha=node.nodestack.ha,
                                 cliha=node.clientstack.ha)
        looper.add(restartedNode)
        restartedNodes.append(restartedNode)

    # Starting the node whose HA was changed
    config_helper = PNodeConfigHelper(newNode.name, tconf, chroot=tdir)
    node = TestNode(newNode.name,
                    ledger_dir=config_helper.ledger_dir,
                    keys_dir=config_helper.keys_dir,
                    genesis_dir=config_helper.genesis_dir,
                    plugins_dir=config_helper.plugins_dir,
                    config=tconf,
                    ha=nodeNewHa,
                    cliha=clientNewHa)
    looper.add(node)
    restartedNodes.append(node)

    looper.run(checkNodesConnected(restartedNodes))
    waitNodeDataEquality(looper, node, *restartedNodes[:-1])

    # Building a new client that reads from the genesis txn file
    # but is able to connect to all nodes
    client, wallet = buildPoolClientAndWallet(poolTxnClientData,
                                              tdirWithClientPoolTxns)
    looper.add(client)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, client,
                                                  *restartedNodes)
Exemple #8
0
def demote_node(node, looper, client, wallet):
    node_data = {ALIAS: node.name, SERVICES: []}
    updateNodeData(looper, client, wallet, node, node_data)
def test_primary_selection_after_demoted_node_promotion(
        looper, txnPoolNodeSet, nodeThetaAdded,
        tconf, tdirWithPoolTxns, allPluginsPath):
    """
    Demote non-primary node
    Promote it again
    Restart one node to get the following difference with others:
        - not restarted - node registry and related pool parameters are kept
          in memory in some state which is expected as the same as
          in the pool ledger
        - restarted one - loaded node registry and pool parameters from
          the pool ledger at startup
    Do several view changes and check that all nodes will choose previously
        demoted / promoted node as a primary for some instanse
    """

    nodeThetaSteward, nodeThetaStewardWallet, nodeTheta = nodeThetaAdded

    # viewNo0 = checkViewNoForNodes(txnPoolNodeSet)
    check_all_nodes_the_same_pool_list(txnPoolNodeSet)

    logger.info("1. Demote node Theta")

    node_data = {
        ALIAS: nodeTheta.name,
        SERVICES: []
    }
    updateNodeData(looper, nodeThetaSteward,
                   nodeThetaStewardWallet, nodeTheta, node_data)
    remainingNodes = list(set(txnPoolNodeSet) - {nodeTheta})

    check_all_nodes_the_same_pool_list(remainingNodes)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # TODO view change might happen unexpectedly by unknown reason
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    logger.info("2. Promote node Theta back")

    node_data = {
        ALIAS: nodeTheta.name,
        SERVICES: [VALIDATOR]
    }
    updateNodeData(looper, nodeThetaSteward,
                   nodeThetaStewardWallet, nodeTheta, node_data)

    check_all_nodes_the_same_pool_list(txnPoolNodeSet)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("3. Restart one node")
    stopped_node = txnPoolNodeSet[0]

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            stopped_node, stopNode=True)
    looper.removeProdable(stopped_node)
    remainingNodes = list(set(txnPoolNodeSet) - {stopped_node})
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    # start node
    restartedNode = start_stopped_node(stopped_node, looper, tconf,
                                       tdirWithPoolTxns, allPluginsPath)
    txnPoolNodeSet = remainingNodes + [restartedNode]
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("4. Do view changes to check that nodeTheta will be chosen "
                "as a primary for some instance by all nodes after some rounds")
    while txnPoolNodeSet[0].viewNo < 4:
        ensure_view_change_complete(looper, txnPoolNodeSet)
        # ensure pool is working properly
        sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                            nodeThetaSteward, numReqs=3)