コード例 #1
0
ファイル: pool_manager.py プロジェクト: mhdhaque/indy-plenum
    def getNodeStackParams(name,
                           nodeRegistry: Dict[str, HA],
                           ha: HA = None,
                           keys_dir: str = None) -> Tuple[dict, dict, dict]:
        """
        Return tuple(nodeStack params, nodeReg)
        """
        me = nodeRegistry[name]
        if isinstance(me, NodeDetail):
            sha = me.ha
            nodeReg = {k: v.ha for k, v in nodeRegistry.items()}
        else:
            sha = me if isinstance(me, HA) else HA(*me[0])
            nodeReg = {
                k: v if isinstance(v, HA) else HA(*v[0])
                for k, v in nodeRegistry.items()
            }
        if not ha:  # pull it from the registry
            ha = sha

        cliNodeReg = {r.cliname: r.cliha for r in nodeRegistry.values()}

        nstack = dict(name=name,
                      ha=ha,
                      main=True,
                      auth_mode=AuthMode.RESTRICTED.value)

        if keys_dir:
            nstack['basedirpath'] = keys_dir

        return nstack, nodeReg, cliNodeReg
コード例 #2
0
def testNodeKeysChanged(looper, txnPoolNodeSet, tdir,
                        tconf, sdk_node_theta_added,
                        sdk_pool_handle,
                        allPluginsPath=None):
    new_steward_wallet, new_node = sdk_node_theta_added

    new_node.stop()
    looper.removeProdable(name=new_node.name)
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = base58.b58encode(SimpleSigner(seed=sigseed).naclSigner.verraw).decode("utf-8")
    sdk_change_node_keys(looper, new_node, new_steward_wallet, sdk_pool_handle, verkey)

    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    initNodeKeysForBothStacks(new_node.name, config_helper.keys_dir, sigseed,
                              override=True)

    logger.debug("{} starting with HAs {} {}".format(new_node, nodeHa, nodeCHa))

    node = TestNode(new_node.name,
                    config_helper=config_helper,
                    config=tconf,
                    ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node

    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
コード例 #3
0
    def getStackParamsAndNodeReg(self,
                                 name,
                                 keys_dir,
                                 nodeRegistry=None,
                                 ha=None,
                                 cliname=None,
                                 cliha=None):
        nodeReg, cliNodeReg, nodeKeys = self.parseLedgerForHaAndKeys(
            self.ledger)

        self.addRemoteKeysFromLedger(nodeKeys)

        # If node name was not found in the pool transactions file
        if not ha:
            ha = nodeReg[name]

        nstack = dict(name=name,
                      ha=HA('0.0.0.0', ha[1]),
                      main=True,
                      auth_mode=AuthMode.RESTRICTED.value)

        cliname = cliname or (name + CLIENT_STACK_SUFFIX)
        if not cliha:
            cliha = cliNodeReg[cliname]
        cstack = dict(name=cliname or (name + CLIENT_STACK_SUFFIX),
                      ha=HA('0.0.0.0', cliha[1]),
                      main=True,
                      auth_mode=AuthMode.ALLOW_ANY.value)

        if keys_dir:
            nstack['basedirpath'] = keys_dir
            cstack['basedirpath'] = keys_dir

        return nstack, cstack, nodeReg, cliNodeReg
コード例 #4
0
def update_node_data_and_reconnect(looper, txnPoolNodeSet, steward_wallet,
                                   sdk_pool_handle, node, new_node_ip,
                                   new_node_port, new_client_ip,
                                   new_client_port, tdir, tconf):
    node_ha = node.nodestack.ha
    cli_ha = node.clientstack.ha
    node_dest = hexToFriendly(node.nodestack.verhex)
    sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, node_dest,
                         node.name, new_node_ip, new_node_port, new_client_ip,
                         new_client_port)
    # restart the Node with new HA
    node.stop()
    looper.removeProdable(name=node.name)
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=HA(new_node_ip or node_ha.host, new_node_port
                                   or node_ha.port),
                             cliha=HA(new_client_ip or cli_ha.host,
                                      new_client_port or cli_ha.port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, steward_wallet,
                               sdk_pool_handle)
    return restartedNode
コード例 #5
0
ファイル: pool_manager.py プロジェクト: mhdhaque/indy-plenum
    def getClientStackParams(name, nodeRegistry: Dict[str, HA], cliname, cliha,
                             keys_dir) -> dict:
        """
        Return clientStack params
        """
        me = nodeRegistry[name]
        if isinstance(me, NodeDetail):
            sha = me.ha
            scliname = me.cliname
            scliha = me.cliha
        else:
            sha = me if isinstance(me, HA) else HA(*me[0])
            scliname = None
            scliha = None

        if not cliname:  # default to the name plus the suffix
            cliname = scliname if scliname else name + CLIENT_STACK_SUFFIX
        if not cliha:  # default to same ip, port + 1
            cliha = scliha if scliha else HA(sha[0], sha[1] + 1)

        cstack = dict(name=cliname,
                      ha=cliha,
                      main=True,
                      auth_mode=AuthMode.ALLOW_ANY.value)

        if keys_dir:
            cstack['basedirpath'] = keys_dir

        return cstack
コード例 #6
0
ファイル: stack_manager.py プロジェクト: Artemkaaas/plenum
    def connectNewRemote(self, txn, remoteName, nodeOrClientObj,
                         addRemote=True):
        # TODO: Need to handle abbreviated verkey
        verkey = cryptonymToHex(txn[TARGET_NYM])

        nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
        cliHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT])

        if addRemote:
            try:
                # Override any keys found, reason being the scenario where
                # before this node comes to know about the other node, the other
                # node tries to connect to it.
                initRemoteKeys(self.name, remoteName, self.basedirpath,
                                   verkey, override=True)
            except Exception as ex:
                logger.error("Exception while initializing keep for remote {}".
                             format(ex))

        if self.isNode:
            nodeOrClientObj.nodeReg[remoteName] = HA(*nodeHa)
            nodeOrClientObj.cliNodeReg[remoteName + CLIENT_STACK_SUFFIX] = HA(*cliHa)
            logger.debug("{} adding new node {} with HA {}".format(self.name,
                                                                   remoteName,
                                                                   nodeHa))
        else:
            nodeOrClientObj.nodeReg[remoteName] = HA(*cliHa)
            logger.debug("{} adding new node {} with HA {}".format(self.name,
                                                                   remoteName,
                                                                   cliHa))
        nodeOrClientObj.nodestack.maintainConnections(force=True)
コード例 #7
0
def test_state_regenerated_from_ledger(looper, txnPoolNodeSet, client1,
                                       wallet1, client1Connected, tconf,
                                       tdirWithPoolTxns, allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start
    """
    sent_batches = 10
    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1,
                                           5 * sent_batches, sent_batches)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    node_to_stop = txnPoolNodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    nodeHa, nodeCHa = HA(*node_to_stop.nodestack.ha), HA(
        *node_to_stop.clientstack.ha)

    node_to_stop.stop()
    looper.removeProdable(node_to_stop)

    shutil.rmtree(state_db_path)

    restarted_node = TestNode(node_to_stop.name,
                              basedirpath=tdirWithPoolTxns,
                              config=tconf,
                              ha=nodeHa,
                              cliha=nodeCHa,
                              pluginPaths=allPluginsPath)
    looper.add(restarted_node)
    txnPoolNodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1])
コード例 #8
0
    def onPoolMembershipChange(self, txn):
        if txn[TXN_TYPE] == NODE:
            nodeName = txn[DATA][ALIAS]
            nodeNym = txn[TARGET_NYM]

            def _updateNode(txn):
                if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}. \
                        intersection(set(txn[DATA].keys())):
                    self.nodeHaChanged(txn)
                if VERKEY in txn:
                    self.nodeKeysChanged(txn)
                if SERVICES in txn[DATA]:
                    self.nodeServicesChanged(txn)

            if nodeName in self.nodeReg:
                # The node was already part of the pool so update
                _updateNode(txn)
            else:
                seqNos, info = self.getNodeInfoFromLedger(nodeNym)
                if len(seqNos) == 1:
                    # Since only one transaction has been made, this is a new
                    # node transaction
                    if VALIDATOR in txn[DATA].get(SERVICES, []):
                        self.addNewNodeAndConnect(txn)
                else:
                    self.node.nodeReg[nodeName] = HA(info[DATA][NODE_IP],
                                                     info[DATA][NODE_PORT])
                    self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(
                        info[DATA][CLIENT_IP], info[DATA][CLIENT_PORT])
                    _updateNode(txn)

            self.node.sendPoolInfoToClients(txn)
コード例 #9
0
def testNodeKeysChanged(looper, txnPoolNodeSet, tdirWithPoolTxns,
                        tconf, steward1, nodeThetaAdded,
                        allPluginsPath=None):
    newSteward, newStewardWallet, newNode = nodeThetaAdded

    newNode.stop()
    looper.removeProdable(name=newNode.name)
    nodeHa, nodeCHa = HA(*newNode.nodestack.ha), HA(*newNode.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = base58.b58encode(SimpleSigner(seed=sigseed).naclSigner.verraw)
    changeNodeKeys(looper, newSteward, newStewardWallet, newNode, verkey)
    initNodeKeysForBothStacks(newNode.name, tdirWithPoolTxns, sigseed,
                              override=True)

    logger.debug("{} starting with HAs {} {}".format(newNode, nodeHa, nodeCHa))
    node = TestNode(newNode.name, basedirpath=tdirWithPoolTxns, config=tconf,
                    ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node

    looper.run(checkNodesConnected(stacks=txnPoolNodeSet))
    waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
コード例 #10
0
def run_node(config, name, node_ip, node_port, client_ip, client_port):
    node_ha = HA(node_ip, node_port)
    client_ha = HA(client_ip, client_port)

    node_config_helper = NodeConfigHelper(name, config)

    logFileName = os.path.join(node_config_helper.log_dir, name + ".log")

    logger = getlogger()
    Logger().apply_config(config)
    Logger().enableFileLogging(logFileName)

    logger.setLevel(config.logLevel)
    logger.debug("You can find logs in {}".format(logFileName))

    vars = [var for var in os.environ.keys() if var.startswith("INDY")]
    logger.debug("Indy related env vars: {}".format(vars))

    with Looper(debug=config.LOOPER_DEBUG) as looper:
        node = Node(name,
                    config_helper=node_config_helper,
                    ha=node_ha, cliha=client_ha)
        node = integrate(node_config_helper, node, logger)
        looper.add(node)
        looper.run()
コード例 #11
0
ファイル: node_runner.py プロジェクト: kc-diabeat/indy-node
def run_node(config, name, node_port, client_port):
    node_ha = HA("0.0.0.0", node_port)
    client_ha = HA("0.0.0.0", client_port)

    logFileName = os.path.join(config.baseDir, name + ".log")

    Logger(config)
    Logger().enableFileLogging(logFileName)

    logger = getlogger()
    logger.setLevel(config.logLevel)
    logger.debug("You can find logs in {}".format(logFileName))

    vars = [var for var in os.environ.keys() if var.startswith("INDY")]
    logger.debug("Indy related env vars: {}".format(vars))

    from stp_core.loop.looper import Looper
    from indy_node.server.node import Node
    with Looper(debug=config.LOOPER_DEBUG) as looper:
        node = Node(name,
                    nodeRegistry=None,
                    basedirpath=config.baseDir,
                    ha=node_ha,
                    cliha=client_ha)
        looper.add(node)
        looper.run()
コード例 #12
0
def updateNodeDataAndReconnect(looper, steward, stewardWallet, node,
                               node_data,
                               tdirWithPoolTxns, tconf, txnPoolNodeSet):
    updateNodeData(looper, steward, stewardWallet, node, node_data)
    # restart the Node with new HA
    node.stop()
    node_alias = node_data.get(ALIAS, None) or node.name
    node_ip = node_data.get(NODE_IP, None) or node.nodestack.ha.host
    node_port = node_data.get(NODE_PORT, None) or node.nodestack.ha.port
    client_ip = node_data.get(CLIENT_IP, None) or node.clientstack.ha.host
    client_port = node_data.get(CLIENT_PORT, None) or node.clientstack.ha.port
    looper.removeProdable(name=node.name)
    restartedNode = TestNode(node_alias, basedirpath=tdirWithPoolTxns, base_data_dir=tdirWithPoolTxns,
                             config=tconf,
                             ha=HA(node_ip, node_port),
                             cliha=HA(client_ip, client_port))
    looper.add(restartedNode)

    # replace node in txnPoolNodeSet
    try:
        idx = next(i for i, n in enumerate(txnPoolNodeSet)
                   if n.name == node.name)
    except StopIteration:
        raise Exception('{} is not the pool'.format(node))
    txnPoolNodeSet[idx] = restartedNode

    looper.run(checkNodesConnected(txnPoolNodeSet))
    return restartedNode
コード例 #13
0
def testNodeCatchupFPlusOne(looper, txnPoolNodeSet, sdk_pool_handle,
                            sdk_wallet_steward, tconf, tdir, tdirWithPoolTxns,
                            allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    # Excluding state check since the node is stopped hence the state db is closed
    waitNodeDataInequality(looper,
                           node0,
                           *txnPoolNodeSet[:-2],
                           exclude_from_check=['check_state'])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
コード例 #14
0
def overlapNodePorts(nodeReg):
    """
    From the given node registry, make Alpha and Beta run on the same port.
    """
    A = nodeReg['Alpha']
    betaPort = nodeReg['Beta'].ha.port
    nodeReg['Alpha'] = NodeDetail(HA(A.ha.host, betaPort), A.cliname,
                                  HA(A.cliha.host, A.cliha.port))
def test_catchup_with_lost_first_consistency_proofs(txnPoolNodeSet,
                                                    looper,
                                                    sdk_pool_handle,
                                                    sdk_wallet_steward,
                                                    tconf,
                                                    tdir,
                                                    allPluginsPath,
                                                    monkeypatch,
                                                    lost_count):
    '''Skip processing of first lost_count CONSISTENCY_PROOFs in catchup. In
    this case catchup node has no quorum with f+1 CONSISTENCY_PROOFs for the
    longer transactions list. It need to request CONSISTENCY_PROOFs again and
    finishes catchup.
    Test makes sure that the node eventually finishes catchup'''
    node_to_disconnect = txnPoolNodeSet[-1]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA(
        *node_to_disconnect.clientstack.ha)
    config_helper = PNodeConfigHelper(node_to_disconnect.name, tconf,
                                      chroot=tdir)
    node_to_disconnect = TestNode(node_to_disconnect.name,
                                  config_helper=config_helper,
                                  config=tconf,
                                  ha=nodeHa, cliha=nodeCHa,
                                  pluginPaths=allPluginsPath)

    def unpatch_after_call(proof, frm):
        global call_count
        call_count += 1
        if call_count >= lost_count:
            # unpatch processConsistencyProof after lost_count calls
            node_to_disconnect.nodeMsgRouter.add((ConsistencyProof,
                                                  node_to_disconnect.ledgerManager.processConsistencyProof))
            call_count = 0

    # patch processConsistencyProof
    node_to_disconnect.nodeMsgRouter.add((ConsistencyProof, unpatch_after_call))
    # add node_to_disconnect to pool
    looper.add(node_to_disconnect)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
def test_catchup_with_lost_ledger_status(txnPoolNodeSet,
                                         looper,
                                         sdk_pool_handle,
                                         sdk_wallet_steward,
                                         tconf,
                                         tdir,
                                         allPluginsPath,
                                         monkeypatch,
                                         lost_count):
    '''Skip processing of lost_count Message Responses with LEDGER STATUS
    in catchup; test makes sure that the node eventually finishes catchup'''

    node_to_disconnect = txnPoolNodeSet[-1]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_disconnect)
    looper.removeProdable(name=node_to_disconnect.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA(
        *node_to_disconnect.clientstack.ha)
    config_helper = PNodeConfigHelper(node_to_disconnect.name, tconf,
                                      chroot=tdir)
    node_to_disconnect = TestNode(node_to_disconnect.name,
                                  config_helper=config_helper,
                                  config=tconf,
                                  ha=nodeHa, cliha=nodeCHa,
                                  pluginPaths=allPluginsPath)

    def unpatch_after_call(status, frm):
        global call_count
        call_count += 1
        if call_count >= lost_count:
            # unpatch processLedgerStatus after lost_count calls
            node_to_disconnect.nodeMsgRouter.add((LedgerStatus, node_to_disconnect.ledgerManager.processLedgerStatus))
            call_count = 0

    # patch processLedgerStatus
    node_to_disconnect.nodeMsgRouter.add((LedgerStatus, unpatch_after_call))

    # add node_to_disconnect to pool
    looper.add(node_to_disconnect)
    txnPoolNodeSet[-1] = node_to_disconnect
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, node_to_disconnect, *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
コード例 #17
0
 def addNewNodeAndConnect(self, txn_data):
     nodeName = txn_data[DATA][ALIAS]
     if nodeName == self.name:
         logger.debug("{} adding itself to node registry".format(self.name))
         self.node.nodeReg[nodeName] = HA(txn_data[DATA][NODE_IP],
                                          txn_data[DATA][NODE_PORT])
         self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = \
             HA(txn_data[DATA][CLIENT_IP],
                txn_data[DATA][CLIENT_PORT])
     else:
         self.connectNewRemote(txn_data, nodeName, self.node,
                               nodeName != self.name)
コード例 #18
0
def testNodeCatchupFPlusOne(txnPoolNodeSet, poolAfterSomeTxns, tconf, tdir,
                            tdirWithPoolTxns, allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """
    looper, client, wallet = poolAfterSomeTxns

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    waitNodeDataInequality(looper, node0, *txnPoolNodeSet[:-2])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
コード例 #19
0
ファイル: stack_manager.py プロジェクト: Artemkaaas/plenum
    def parseLedgerForHaAndKeys(ledger, returnActive=True):
        """
        Returns validator ip, ports and keys
        :param ledger:
        :param returnActive: If returnActive is True, return only those
        validators which are not out of service
        :return:
        """
        nodeReg = OrderedDict()
        cliNodeReg = OrderedDict()
        nodeKeys = {}
        activeValidators = set()
        for _, txn in ledger.getAllTxn():
            if txn[TXN_TYPE] == NODE:
                nodeName = txn[DATA][ALIAS]
                clientStackName = nodeName + CLIENT_STACK_SUFFIX
                nHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT]) \
                    if (NODE_IP in txn[DATA] and NODE_PORT in txn[DATA]) \
                    else None
                cHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT]) \
                    if (CLIENT_IP in txn[DATA] and CLIENT_PORT in txn[DATA]) \
                    else None
                if nHa:
                    nodeReg[nodeName] = HA(*nHa)
                if cHa:
                    cliNodeReg[clientStackName] = HA(*cHa)

                try:
                    # TODO: Need to handle abbreviated verkey
                    verkey = cryptonymToHex(txn[TARGET_NYM])
                except ValueError as ex:
                    raise ValueError("Invalid verkey. Rebuild pool transactions.")

                nodeKeys[nodeName] = verkey

                services = txn[DATA].get(SERVICES)
                if isinstance(services, list):
                    if VALIDATOR in services:
                        activeValidators.add(nodeName)
                    else:
                        activeValidators.discard(nodeName)

        if returnActive:
            allNodes = tuple(nodeReg.keys())
            for nodeName in allNodes:
                if nodeName not in activeValidators:
                    nodeReg.pop(nodeName, None)
                    cliNodeReg.pop(nodeName + CLIENT_STACK_SUFFIX, None)
                    nodeKeys.pop(nodeName, None)

            return nodeReg, cliNodeReg, nodeKeys
        else:
            return nodeReg, cliNodeReg, nodeKeys, activeValidators
コード例 #20
0
ファイル: helper.py プロジェクト: zukobronja/indy-node
def start_stopped_node(stopped_node, looper, tconf, tdir, allPluginsPath):
    nodeHa, nodeCHa = HA(*stopped_node.nodestack.ha), HA(
        *stopped_node.clientstack.ha)
    config_helper = NodeConfigHelper(stopped_node.name, tconf, chroot=tdir)
    restarted_node = TestNode(stopped_node.name,
                              config_helper=config_helper,
                              config=tconf,
                              ha=nodeHa,
                              cliha=nodeCHa,
                              pluginPaths=allPluginsPath)
    looper.add(restarted_node)
    return restarted_node
コード例 #21
0
    def nodeServicesChanged(self, txn_data) -> bool:
        nodeNym = txn_data[TARGET_NYM]
        nodeName = self.getNodeName(nodeNym)
        oldServices = set(self._ordered_node_services.get(nodeNym, []))
        newServices = set(txn_data[DATA].get(SERVICES, []))
        if oldServices == newServices:
            logger.info(
                "Node {} not changing {} since it is same as existing".format(
                    nodeNym, SERVICES))
            return False

        node_count_changed = False
        if VALIDATOR in newServices.difference(oldServices):
            node_count_changed = True
            # If validator service is enabled
            node_info = self.write_manager.get_node_data(nodeNym)
            self.node.nodeReg[nodeName] = HA(node_info[NODE_IP],
                                             node_info[NODE_PORT])
            self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(
                node_info[CLIENT_IP], node_info[CLIENT_PORT])

            self.updateNodeTxns({
                DATA: node_info,
            }, txn_data)

            if self.name != nodeName:
                self.connectNewRemote({
                    DATA: node_info,
                    TARGET_NYM: nodeNym
                }, nodeName, self.node)
            else:
                logger.debug("{} adding itself to node registry".format(
                    self.name))

        if VALIDATOR in oldServices.difference(newServices):
            node_count_changed = True
            # If validator service is disabled
            del self.node.nodeReg[nodeName]
            del self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX]

            if self.name != nodeName:
                try:
                    rid = TxnStackManager.removeRemote(self.node.nodestack,
                                                       nodeName)
                    if rid:
                        self.node.nodestack.outBoxes.pop(rid, None)
                except RemoteNotFound:
                    logger.info('{} did not find remote {} to remove'.format(
                        self, nodeName))
                self.node_about_to_be_disconnected(nodeName)

        return node_count_changed
コード例 #22
0
ファイル: stack_manager.py プロジェクト: Artemkaaas/plenum
    def stackHaChanged(self, txn, remoteName, nodeOrClientObj):
        nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
        cliHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT])
        rid = self.removeRemote(nodeOrClientObj.nodestack, remoteName)
        if self.isNode:
            nodeOrClientObj.nodeReg[remoteName] = HA(*nodeHa)
            nodeOrClientObj.cliNodeReg[remoteName + CLIENT_STACK_SUFFIX] = HA(*cliHa)
        else:
            nodeOrClientObj.nodeReg[remoteName] = HA(*cliHa)

        # Attempt connection at the new HA
        nodeOrClientObj.nodestack.maintainConnections(force=True)

        return rid
コード例 #23
0
    def stackHaChanged(self, txn_data, remoteName, nodeOrClientObj):
        nodeHa = None
        cliHa = None
        if self.isNode:
            node_ha_changed = False
            (ip, port) = nodeOrClientObj.nodeReg[remoteName]
            if NODE_IP in txn_data[DATA] and ip != txn_data[DATA][NODE_IP]:
                ip = txn_data[DATA][NODE_IP]
                node_ha_changed = True

            if NODE_PORT in txn_data[
                    DATA] and port != txn_data[DATA][NODE_PORT]:
                port = txn_data[DATA][NODE_PORT]
                node_ha_changed = True

            if node_ha_changed:
                nodeHa = (ip, port)

        cli_ha_changed = False
        (ip, port) = nodeOrClientObj.cliNodeReg[remoteName + CLIENT_STACK_SUFFIX] \
            if self.isNode \
            else nodeOrClientObj.nodeReg[remoteName]

        if CLIENT_IP in txn_data[DATA] and ip != txn_data[DATA][CLIENT_IP]:
            ip = txn_data[DATA][CLIENT_IP]
            cli_ha_changed = True

        if CLIENT_PORT in txn_data[
                DATA] and port != txn_data[DATA][CLIENT_PORT]:
            port = txn_data[DATA][CLIENT_PORT]
            cli_ha_changed = True

        if cli_ha_changed:
            cliHa = (ip, port)

        rid = self.removeRemote(nodeOrClientObj.nodestack, remoteName)
        if self.isNode:
            if nodeHa:
                nodeOrClientObj.nodeReg[remoteName] = HA(*nodeHa)
            if cliHa:
                nodeOrClientObj.cliNodeReg[remoteName +
                                           CLIENT_STACK_SUFFIX] = HA(*cliHa)
        elif cliHa:
            nodeOrClientObj.nodeReg[remoteName] = HA(*cliHa)

        # Attempt connection at the new HA
        nodeOrClientObj.nodestack.maintainConnections(force=True)

        return rid
コード例 #24
0
 def __init__(self, tmpdir):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.replicas = [
         Replica(node=self, instId=0, isMaster=True),
         Replica(node=self, instId=1, isMaster=False),
         Replica(node=self, instId=2, isMaster=False),
     ]
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.config = getConfig()  # TODO do we need fake object here?
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
コード例 #25
0
    def __init__(self,
                 name: str = None,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 peerHA: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None,
                 sighex: str = None):
        config = config or getConfig()
        super().__init__(name, nodeReg, ha, basedirpath, config, sighex)
        self.autoDiscloseAttributes = False
        self.requestedPendingTxns = False
        self.hasAnonCreds = bool(peerHA)
        if self.hasAnonCreds:
            self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)

            stackargs = dict(name=self.stackName,
                             ha=peerHA,
                             main=True,
                             auth_mode=AuthMode.ALLOW_ANY.value)

            self.peerMsgRoutes = []
            self.peerMsgRouter = Router(*self.peerMsgRoutes)
            self.peerStack = self.peerStackClass(
                stackargs, msgHandler=self.handlePeerMessage)
            self.peerStack.sign = self.sign
            self.peerInbox = deque()
        self._observers = {}  # type Dict[str, Callable]
        self._observerSet = set(
        )  # makes it easier to guard against duplicates
コード例 #26
0
 def getNext(self, count: int = 1, ip=None):
     ip = ip or self.ip
     has = [HA(ip, port) for port in self.get(count)]
     if len(has) == 1:
         return has[0]
     else:
         return has
コード例 #27
0
    def __init__(self,
                 port: int,
                 msgHandler: Callable,
                 name: str = None,
                 basedirpath: str = None,
                 seed=None,
                 onlyListener=False,
                 msgRejectHandler=None):
        stackParams = {
            "name": name or randomString(8),
            "ha": HA("0.0.0.0", port),
            "auth_mode": AuthMode.ALLOW_ANY.value
        }
        if basedirpath:
            stackParams["basedirpath"] = os.path.join(basedirpath, "keys")

        seed = seed or randomSeed()
        SimpleZStack.__init__(self,
                              stackParams,
                              self.tracedMsgHandler,
                              seed=seed,
                              onlyListener=onlyListener,
                              msgRejectHandler=msgRejectHandler)

        self.msgHandler = msgHandler
コード例 #28
0
ファイル: script_helper.py プロジェクト: aigoncharov/plenum
def changeHA(looper, config, nodeName, nodeSeed, newNodeHA,
             stewardName, stewardsSeed, newClientHA=None):
    if not newClientHA:
        newClientHA = HA(newNodeHA.host, newNodeHA.port + 1)

    # prepare steward wallet
    stewardSigner = SimpleSigner(seed=stewardsSeed)
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=stewardSigner)

    # prepare client to submit change ha request
    _, randomClientPort = genHa()
    client = Client(stewardName,
                    ha=('0.0.0.0', randomClientPort), config=config)
    looper.add(client)
    timeout = waits.expectedClientToPoolConnectionTimeout(4)
    looper.run(eventually(__checkClientConnected, client,
                          retryWait=1, timeout=timeout))

    nodeVerKey = SimpleSigner(seed=nodeSeed).verkey

    # send request
    req = submitNodeIpChange(client, stewardWallet, nodeName, nodeVerKey,
                             newNodeHA, newClientHA)
    return client, req
コード例 #29
0
ファイル: load.py プロジェクト: aigoncharov/plenum
def load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=SimpleSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                fVal=2,
                                                customTimeoutPerReq=3)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>{}<<<'.format(end - start))
        exit(0)
コード例 #30
0
 def __init__(self, tmpdir, config=None):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = dict()
     self.requests = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {
         name: HA("127.0.0.1", 0) for name in self.allNodeNames
     }
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.config = config or getConfigOnce()
     self.replicas = {
         0: Replica(node=self, instId=0, isMaster=True, config=self.config),
         1: Replica(node=self, instId=1, isMaster=False, config=self.config),
         2: Replica(node=self, instId=2, isMaster=False, config=self.config),
     }
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
     self.metrics = NullMetricsCollector()