コード例 #1
0
def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf,
                                    allPluginsPath,
                                    tdirWithPoolTxns,
                                    sdk_pool_handle,
                                    sdk_wallet_client,
                                    capsys):
    nodes = txnPoolNodeSet
    x = nodes[-1]

    with capsys.disabled():
        print("Stopping node {} with pool ledger size {}".
              format(x, x.poolManager.txnSeqNo))

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, x)
    looper.removeProdable(x)

    client_batches = 80
    txns_per_batch = 10
    for i in range(client_batches):
        s = perf_counter()
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, txns_per_batch)
        with capsys.disabled():
            print('{} executed {} client txns in {:.2f} seconds'.
                  format(i + 1, txns_per_batch, perf_counter() - s))

    nodeHa, nodeCHa = HA(*x.nodestack.ha), HA(*x.clientstack.ha)
    newNode = TestNode(x.name, basedirpath=tdirWithPoolTxns, base_data_dir=tdirWithPoolTxns, config=tconf,
                       ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
    looper.add(newNode)
    txnPoolNodeSet[-1] = newNode
    looper.run(checkNodesConnected(txnPoolNodeSet))
コード例 #2
0
def testCreateAgentDoesNotAllocatePort(tdirWithPoolTxns):
    for i in range(2):
        checkPortAvailable(HA("0.0.0.0", agentPort))
        agent = getNewAgent("Agent0", tdirWithPoolTxns, agentPort,
                            agentWallet())
        checkPortAvailable(HA("0.0.0.0", agentPort))
        agent.stop()
コード例 #3
0
ファイル: pool_manager.py プロジェクト: loxadim/plenum
    def onPoolMembershipChange(self, txn):
        if txn[TXN_TYPE] == NODE:
            nodeName = txn[DATA][ALIAS]
            nodeNym = txn[TARGET_NYM]

            def _updateNode(txn):
                if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}. \
                        intersection(set(txn[DATA].keys())):
                    self.nodeHaChanged(txn)
                if VERKEY in txn:
                    self.nodeKeysChanged(txn)
                if SERVICES in txn[DATA]:
                    self.nodeServicesChanged(txn)

            if nodeName in self.nodeReg:
                # The node was already part of the pool so update
                _updateNode(txn)
            else:
                seqNos, info = self.getNodeInfoFromLedger(nodeNym)
                if len(seqNos) == 1:
                    # Since only one transaction has been made, this is a new
                    # node transaction
                    self.addNewNodeAndConnect(txn)
                else:
                    self.node.nodeReg[nodeName] = HA(info[DATA][NODE_IP],
                                                     info[DATA][NODE_PORT])
                    self.node.cliNodeReg[nodeName] = HA(
                        info[DATA][CLIENT_IP], info[DATA][CLIENT_PORT])
                    _updateNode(txn)

            self.node.sendPoolInfoToClients(txn)
            if self.config.UpdateGenesisPoolTxnFile:
                updateGenesisPoolTxnFile(self.config.baseDir,
                                         self.config.poolTransactionsFile, txn)
コード例 #4
0
    def connectNewRemote(self,
                         txn,
                         remoteName,
                         nodeOrClientObj,
                         addRemote=True):
        verkey = cryptonymToHex(txn[TARGET_NYM])

        nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
        cliHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT])

        if addRemote:
            try:
                # Override any keys found, reason being the scenario where
                # before this node comes to know about the other node, the other
                # node tries to connect to it.
                initRemoteKeep(self.name,
                               remoteName,
                               self.basedirpath,
                               verkey,
                               override=True)
            except Exception as ex:
                logger.error(
                    "Exception while initializing keep for remote {}".format(
                        ex))

        if self.isNode:
            nodeOrClientObj.nodeReg[remoteName] = HA(*nodeHa)
            nodeOrClientObj.cliNodeReg[remoteName +
                                       CLIENT_STACK_SUFFIX] = HA(*cliHa)
            logger.debug("{} adding new node {} with HA {}".format(
                self.name, remoteName, nodeHa))
        else:
            nodeOrClientObj.nodeReg[remoteName] = HA(*cliHa)
            logger.debug("{} adding new node {} with HA {}".format(
                self.name, remoteName, cliHa))
コード例 #5
0
ファイル: pool_manager.py プロジェクト: qyynuaa/plenum
    def getNodeStackParams(name,
                           nodeRegistry: Dict[str, HA],
                           ha: HA = None,
                           basedirpath: str = None) -> Tuple[dict, dict, dict]:
        """
        Return tuple(nodeStack params, nodeReg)
        """
        me = nodeRegistry[name]
        if isinstance(me, NodeDetail):
            sha = me.ha
            nodeReg = {k: v.ha for k, v in nodeRegistry.items()}
        else:

            sha = me if isinstance(me, HA) else HA(*me[0])
            nodeReg = {
                k: v if isinstance(v, HA) else HA(*v[0])
                for k, v in nodeRegistry.items()
            }
        if not ha:  # pull it from the registry
            ha = sha

        cliNodeReg = {r.cliname: r.cliha for r in nodeRegistry.values()}

        nstack = dict(name=name, ha=ha, main=True, auto=AutoMode.never)

        if basedirpath:
            nstack['basedirpath'] = basedirpath

        return nstack, nodeReg, cliNodeReg
コード例 #6
0
ファイル: pool_manager.py プロジェクト: qyynuaa/plenum
    def getClientStackParams(name, nodeRegistry: Dict[str, HA], cliname, cliha,
                             basedirpath) -> dict:
        """
        Return clientStack params
        """
        me = nodeRegistry[name]
        if isinstance(me, NodeDetail):
            sha = me.ha
            scliname = me.cliname
            scliha = me.cliha
        else:
            sha = me if isinstance(me, HA) else HA(*me[0])
            scliname = None
            scliha = None

        if not cliname:  # default to the name plus the suffix
            cliname = scliname if scliname else name + CLIENT_STACK_SUFFIX
        if not cliha:  # default to same ip, port + 1
            cliha = scliha if scliha else HA(sha[0], sha[1] + 1)

        cstack = dict(name=cliname, ha=cliha, main=True, auto=AutoMode.always)

        if basedirpath:
            cstack['basedirpath'] = basedirpath

        return cstack
コード例 #7
0
ファイル: pool_manager.py プロジェクト: qyynuaa/plenum
    def getStackParamsAndNodeReg(self,
                                 name,
                                 basedirpath,
                                 nodeRegistry=None,
                                 ha=None,
                                 cliname=None,
                                 cliha=None):
        nodeReg, cliNodeReg, nodeKeys = self.parseLedgerForHaAndKeys()

        self.addRemoteKeysFromLedger(nodeKeys)

        # If node name was not found in the pool transactions file
        if not ha:
            ha = nodeReg[name]

        nstack = dict(name=name,
                      ha=HA('0.0.0.0', ha[1]),
                      main=True,
                      auto=AutoMode.never)
        nodeReg[name] = HA(*ha)

        cliname = cliname or (name + CLIENT_STACK_SUFFIX)
        if not cliha:
            cliha = cliNodeReg[cliname]
        cstack = dict(name=cliname or (name + CLIENT_STACK_SUFFIX),
                      ha=HA('0.0.0.0', cliha[1]),
                      main=True,
                      auto=AutoMode.always)
        cliNodeReg[cliname] = HA(*cliha)

        if basedirpath:
            nstack['basedirpath'] = basedirpath
            cstack['basedirpath'] = basedirpath

        return nstack, cstack, nodeReg, cliNodeReg
コード例 #8
0
def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNodeSet,
                                             tconf, looper, sdk_pool_handle,
                                             sdk_wallet_client,
                                             tdirWithPoolTxns, allPluginsPath,
                                             capsys):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """
    new_node = sdk_new_node_caught_up
    with capsys.disabled():
        print("Stopping node {} with pool ledger size {}".
              format(new_node, new_node.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, new_node)
    looper.removeProdable(new_node)

    client_batches = 80
    txns_per_batch = 10
    for i in range(client_batches):
        s = perf_counter()
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, txns_per_batch)
        with capsys.disabled():
            print('{} executed {} client txns in {:.2f} seconds'.
                  format(i + 1, txns_per_batch, perf_counter() - s))

    with capsys.disabled():
        print("Starting the stopped node, {}".format(new_node))
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    new_node = TestNode(
        new_node.name,
        basedirpath=tdirWithPoolTxns,
        base_data_dir=tdirWithPoolTxns,
        config=tconf,
        ha=nodeHa,
        cliha=nodeCHa,
        pluginPaths=allPluginsPath)
    looper.add(new_node)
    txnPoolNodeSet[-1] = new_node

    # Delay catchup reply processing so LedgerState does not change
    delay_catchup_reply = 5
    new_node.nodeIbStasher.delay(cr_delay(delay_catchup_reply))
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Make sure ledger starts syncing (sufficient consistency proofs received)
    looper.run(eventually(check_ledger_state, new_node, DOMAIN_LEDGER_ID,
                          LedgerState.syncing, retryWait=.5, timeout=5))

    # Not accurate timeout but a conservative one
    timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \
              2 * delay_catchup_reply
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4],
                         customTimeout=timeout)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4])
コード例 #9
0
def overlapNodePorts(nodeReg):
    """
    From the given node registry, make Alpha and Beta run on the same port.
    """
    A = nodeReg['Alpha']
    betaPort = nodeReg['Beta'].ha.port
    nodeReg['Alpha'] = NodeDetail(HA(A.ha.host, betaPort), A.cliname,
                                  HA(A.cliha.host, A.cliha.port))
コード例 #10
0
 def stackHaChanged(self, txn, remoteName, nodeOrClientObj):
     nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
     cliHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT])
     rid = self.removeRemote(nodeOrClientObj.nodestack, remoteName)
     if self.isNode:
         nodeOrClientObj.nodeReg[remoteName] = HA(*nodeHa)
         nodeOrClientObj.cliNodeReg[remoteName +
                                    CLIENT_STACK_SUFFIX] = HA(*cliHa)
     else:
         nodeOrClientObj.nodeReg[remoteName] = HA(*cliHa)
     return rid
コード例 #11
0
def test_node_catchup_after_restart_no_txns(
        newNodeCaughtUp,
        txnPoolNodeSet,
        tdir,
        tconf,
        nodeSetWithNodeAddedAfterSomeTxns,
        tdirWithPoolTxns,
        allPluginsPath):
    """
    A node restarts but no transactions have happened while it was down.
    It would then use the `LedgerStatus` to catchup
    """
    looper, new_node, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.debug("Stopping node {} with pool ledger size {}".
                 format(new_node, new_node.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, new_node)
    looper.removeProdable(name=new_node.name)

    logger.debug("Starting the stopped node, {}".format(new_node))
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    new_node = TestNode(
        new_node.name,
        ledger_dir=config_helper.ledger_dir,
        keys_dir=config_helper.keys_dir,
        genesis_dir=config_helper.genesis_dir,
        plugins_dir=config_helper.plugins_dir,
        config=tconf,
        ha=nodeHa,
        cliha=nodeCHa,
        pluginPaths=allPluginsPath)
    looper.add(new_node)
    txnPoolNodeSet[-1] = new_node
    looper.run(checkNodesConnected(txnPoolNodeSet))

    def chk():
        for node in txnPoolNodeSet[:-1]:
            check_last_ordered_3pc(new_node, node)

    looper.run(eventually(chk, retryWait=1))

    # sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
    # Did not receive any consistency proofs
    assert get_count(new_node.ledgerManager,
                     new_node.ledgerManager.processConsistencyProof) == 0
コード例 #12
0
    def __init__(self,
                 name: str = None,
                 basedirpath: str = None,
                 client: Client = None,
                 port: int = None,
                 loop=None,
                 config=None,
                 endpointArgs=None):

        self.endpoint = None
        if port:
            checkPortAvailable(HA("0.0.0.0", port))
        Motor.__init__(self)
        self.loop = loop or asyncio.get_event_loop()
        self._eventListeners = {}  # Dict[str, set(Callable)]
        self._name = name or 'Agent'
        self._port = port

        self.config = config or getConfig()
        self.basedirpath = basedirpath or os.path.expanduser(
            self.config.baseDir)
        self.endpointArgs = endpointArgs

        # Client used to connect to Sovrin and forward on owner's txns
        self._client = client  # type: Client

        # known identifiers of this agent's owner
        self.ownerIdentifiers = {}  # type: Dict[Identifier, Identity]
コード例 #13
0
ファイル: client.py プロジェクト: mzk-vct/sovrin-client
 def __init__(self,
              name: str,
              nodeReg: Dict[str, HA] = None,
              ha: Union[HA, Tuple[str, int]] = None,
              peerHA: Union[HA, Tuple[str, int]] = None,
              basedirpath: str = None,
              config=None,
              sighex: str = None):
     config = config or getConfig()
     super().__init__(name, nodeReg, ha, basedirpath, config, sighex)
     self.graphStore = self.getGraphStore()
     self.autoDiscloseAttributes = False
     self.requestedPendingTxns = False
     self.hasAnonCreds = bool(peerHA)
     if self.hasAnonCreds:
         self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)
         stackargs = dict(name=self.stackName,
                          ha=peerHA,
                          main=True,
                          auto=AutoMode.always)
         self.peerMsgRoutes = []
         self.peerMsgRouter = Router(*self.peerMsgRoutes)
         self.peerStack = SimpleStack(stackargs,
                                      msgHandler=self.handlePeerMessage)
         self.peerStack.sign = self.sign
         self.peerInbox = deque()
     self._observers = {}  # type Dict[str, Callable]
     self._observerSet = set(
     )  # makes it easier to guard against duplicates
コード例 #14
0
ファイル: script_helper.py プロジェクト: loxadim/plenum
def changeHA(looper,
             config,
             nodeName,
             nodeSeed,
             newNodeHA,
             stewardName,
             stewardsSeed,
             newClientHA=None):

    if not newClientHA:
        newClientHA = HA(newNodeHA.host, newNodeHA.port + 1)

    # prepare steward wallet
    stewardSigner = SimpleSigner(seed=stewardsSeed)
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=stewardSigner)

    # prepare client to submit change ha request to sovrin
    randomClientPort = random.randint(9700, 9799)
    client = Client(stewardName,
                    ha=('0.0.0.0', randomClientPort),
                    config=config)
    looper.add(client)
    looper.run(
        eventually(__checkClientConnected, client, retryWait=1, timeout=5))

    nodeVerKey = SimpleSigner(seed=nodeSeed).verkey

    # send request
    req = submitNodeIpChange(client, stewardWallet, nodeName, nodeVerKey,
                             newNodeHA, newClientHA)
    return client, req
コード例 #15
0
    def parseLedgerForHaAndKeys(ledger, returnActive=True):
        """
        Returns validator ip, ports and keys
        :param ledger:
        :param returnActive: If returnActive is True, return only those
        validators which are not out of service
        :return:
        """
        nodeReg = OrderedDict()
        cliNodeReg = OrderedDict()
        nodeKeys = {}
        activeValidators = set()
        for _, txn in ledger.getAllTxn().items():
            if txn[TXN_TYPE] == NODE:
                nodeName = txn[DATA][ALIAS]
                clientStackName = nodeName + CLIENT_STACK_SUFFIX
                nHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT]) \
                    if (NODE_IP in txn[DATA] and NODE_PORT in txn[DATA]) \
                    else None
                cHa = (txn[DATA][CLIENT_IP], txn[DATA][CLIENT_PORT]) \
                    if (CLIENT_IP in txn[DATA] and CLIENT_PORT in txn[DATA]) \
                    else None
                if nHa:
                    nodeReg[nodeName] = HA(*nHa)
                if cHa:
                    cliNodeReg[clientStackName] = HA(*cHa)
                verkey = cryptonymToHex(txn[TARGET_NYM])
                nodeKeys[nodeName] = verkey

                services = txn[DATA].get(SERVICES)
                if isinstance(services, list):
                    if VALIDATOR in services:
                        activeValidators.add(nodeName)
                    else:
                        activeValidators.discard(nodeName)

        if returnActive:
            allNodes = tuple(nodeReg.keys())
            for nodeName in allNodes:
                if nodeName not in activeValidators:
                    nodeReg.pop(nodeName, None)
                    cliNodeReg.pop(nodeName + CLIENT_STACK_SUFFIX, None)
                    nodeKeys.pop(nodeName, None)

            return nodeReg, cliNodeReg, nodeKeys
        else:
            return nodeReg, cliNodeReg, nodeKeys, activeValidators
コード例 #16
0
def txnPoolCliNodeReg(poolTxnData):
    cliNodeReg = {}
    for txn in poolTxnData["txns"]:
        if txn[TXN_TYPE] == NEW_NODE:
            data = txn[DATA]
            cliNodeReg[data[ALIAS] + CLIENT_STACK_SUFFIX] = HA(
                data[CLIENT_IP], data[CLIENT_PORT])
    return cliNodeReg
コード例 #17
0
def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf,
                                    tdirWithPoolTxns, allPluginsPath,
                                    poolTxnStewardData, capsys):

    client, wallet = buildPoolClientAndWallet(poolTxnStewardData,
                                              tdirWithPoolTxns,
                                              clientClass=TestClient)
    looper.add(client)
    looper.run(client.ensureConnectedToNodes())

    nodes = txnPoolNodeSet
    x = nodes[-1]

    with capsys.disabled():
        print("Stopping node {} with pool ledger size {}".format(
            x, x.poolManager.txnSeqNo))

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, x)
    looper.removeProdable(x)

    client_batches = 80
    txns_per_batch = 10
    for i in range(client_batches):
        s = perf_counter()
        sendReqsToNodesAndVerifySuffReplies(looper,
                                            wallet,
                                            client,
                                            txns_per_batch,
                                            override_timeout_limit=True)
        with capsys.disabled():
            print('{} executed {} client txns in {:.2f} seconds'.format(
                i + 1, txns_per_batch,
                perf_counter() - s))

    nodeHa, nodeCHa = HA(*x.nodestack.ha), HA(*x.clientstack.ha)
    newNode = TestNode(x.name,
                       basedirpath=tdirWithPoolTxns,
                       base_data_dir=tdirWithPoolTxns,
                       config=tconf,
                       ha=nodeHa,
                       cliha=nodeCHa,
                       pluginPaths=allPluginsPath)
    looper.add(newNode)
    txnPoolNodeSet[-1] = newNode
    looper.run(checkNodesConnected(txnPoolNodeSet))
コード例 #18
0
def testNodeKeysChanged(looper,
                        txnPoolNodeSet,
                        tdirWithPoolTxns,
                        tconf,
                        steward1,
                        nodeThetaAdded,
                        allPluginsPath=None):
    newSteward, newStewardWallet, newNode = nodeThetaAdded

    # Since the node returned by fixture `nodeThetaAdded` was abandoned in the
    # previous test, so getting node `Theta` from `txnPoolNodeSet`
    newNode = getNodeWithName(txnPoolNodeSet, newNode.name)

    newNode.stop()
    nodeHa, nodeCHa = HA(*newNode.nodestack.ha), HA(*newNode.clientstack.ha)
    sigseed = randomString(32).encode()
    verkey = SimpleSigner(seed=sigseed).naclSigner.verhex.decode()
    changeNodeKeys(looper, newSteward, newStewardWallet, newNode, verkey)
    initLocalKeep(newNode.name, tdirWithPoolTxns, sigseed)
    initLocalKeep(newNode.name + CLIENT_STACK_SUFFIX, tdirWithPoolTxns,
                  sigseed)
    looper.removeProdable(name=newNode.name)
    logger.debug("{} starting with HAs {} {}".format(newNode, nodeHa, nodeCHa))
    node = TestNode(newNode.name,
                    basedirpath=tdirWithPoolTxns,
                    config=tconf,
                    ha=nodeHa,
                    cliha=nodeCHa,
                    pluginPaths=allPluginsPath)
    looper.add(node)
    # The last element of `txnPoolNodeSet` is the node Theta that was just
    # stopped
    txnPoolNodeSet[-1] = node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   node,
                   *txnPoolNodeSet[:-1],
                   retryWait=1,
                   timeout=10))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,
                                                  *txnPoolNodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *txnPoolNodeSet)
コード例 #19
0
def createClientAndWalletWithSeed(name, seed, ha=None):
    if isinstance(seed, str):
        seed = seed.encode()
    if not ha:
        port = genHa()[1]
        ha = HA('0.0.0.0', port)
    wallet = Wallet(name)
    wallet.addIdentifier(signer=DidSigner(seed=seed))
    client = Client(name, ha=ha)
    return client, wallet
コード例 #20
0
def spawnClient(clientName, port, signerSeed, host='0.0.0.0'):
    clientAddress = HA(host, port)
    # from plenum.client.request_id_store import FileRequestIdStore
    # walletFilePath = os.path.join(config.baseDir, "wallet")
    # print("Storing request ids in {}".format(walletFilePath))
    # store = FileRequestIdStore(walletFilePath)
    # wallet = Wallet(clientName, store)
    wallet = Wallet(clientName)
    wallet.addIdentifier(signer=DidSigner(seed=signerSeed))
    client = Client(clientName, ha=clientAddress)
    return client, wallet
コード例 #21
0
 def addNewNodeAndConnect(self, txn):
     """
     Add a new node to remote keep and connect to it.
     """
     verkey, pubkey = hexlify(base64_decode(txn[TARGET_NYM].encode())), \
                      txn[DATA][PUBKEY]
     nodeName = txn[DATA][ALIAS]
     nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
     try:
         initRemoteKeep(self.name, nodeName, self.basedirpath, pubkey,
                        verkey)
     except Exception as ex:
         logger.debug("Exception while initializing keep for remote {}".
                      format(ex))
     self.node.nodestack.nodeReg[nodeName] = HA(*nodeHa)
コード例 #22
0
 def getStackParamsAndNodeReg(self, name, basedirpath, nodeRegistry=None,
                              ha=None, cliname=None, cliha=None):
     nstack = None
     cstack = None
     nodeReg = OrderedDict()
     for _, txn in self.poolTxnStore.getAllTxn().items():
         if txn[TXN_TYPE] == NEW_NODE:
             verkey, pubkey = hexlify(
                 base64_decode(txn[TARGET_NYM].encode())), \
                              txn[DATA][PUBKEY]
             nodeName = txn[DATA][ALIAS]
             nodeHa = (txn[DATA][NODE_IP], txn[DATA][NODE_PORT])
             nodeReg[nodeName] = HA(*nodeHa)
             if nodeName == name:
                 nstack = dict(name=name,
                               ha=HA('0.0.0.0', txn[DATA][NODE_PORT]),
                               main=True,
                               auto=AutoMode.never)
                 cstack = dict(name=nodeName + CLIENT_STACK_SUFFIX,
                               ha=HA('0.0.0.0', txn[DATA][CLIENT_PORT]),
                               main=True,
                               auto=AutoMode.always)
                 if basedirpath:
                     nstack['basedirpath'] = basedirpath
                     cstack['basedirpath'] = basedirpath
             else:
                 try:
                     initRemoteKeep(name, nodeName, basedirpath, pubkey,
                                    verkey)
                 except Exception as ex:
                     print(ex)
         elif txn[TXN_TYPE] in (NEW_STEWARD, NEW_CLIENT) \
                 and self.config.clientBootStrategy == \
                         ClientBootStrategy.PoolTxn:
             self.addNewRole(txn)
     return nstack, cstack, nodeReg
コード例 #23
0
ファイル: helper.py プロジェクト: tomergi/plenum
 def checkIfConnectedToAll(self):
     connected = 0
     # TODO refactor to not use values
     for address in self.nodeReg.values():
         for remote in self.nodestack.remotes.values():
             if HA(*remote.ha) == address:
                 if Stack.isRemoteConnected(remote):
                     connected += 1
                     break
     totalNodes = len(self.nodeReg)
     if connected == 0:
         raise NotConnectedToAny()
     elif connected < totalNodes:
         raise NotFullyConnected()
     else:
         assert connected == totalNodes
コード例 #24
0
ファイル: endpoint.py プロジェクト: wzwerch/sovrin
    def __init__(self, port: int, msgHandler: Callable,
                 name: str=None, basedirpath: str=None):
        if name and basedirpath:
            ha = getHaFromLocalEstate(name, basedirpath)
            if ha and ha[1] != port:
                port = ha[1]

        stackParams = {
            "name": name or randomString(8),
            "ha": HA("0.0.0.0", port),
            "main": True,
            "auto": AutoMode.always,
            "mutable": "mutable"
        }
        if basedirpath:
            stackParams["basedirpath"] = basedirpath

        super().__init__(stackParams, self.baseMsgHandler)

        self.msgHandler = msgHandler
コード例 #25
0
ファイル: pool_manager.py プロジェクト: zmh0531/indy-plenum
    def processPoolTxn(self, txn):
        logger.debug("{} processing pool txn {} ".format(self, txn))
        typ = get_type(txn)
        txn_data = get_payload_data(txn)

        if typ == NODE:
            remoteName = txn_data[DATA][ALIAS] + CLIENT_STACK_SUFFIX
            nodeName = txn_data[DATA][ALIAS]
            nodeNym = txn_data[TARGET_NYM]

            def _update(txn_data):
                if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}.\
                        intersection(set(txn_data[DATA].keys())):
                    self.stackHaChanged(txn_data, remoteName, self)
                if VERKEY in txn_data:
                    self.stackKeysChanged(txn_data, remoteName, self)
                if SERVICES in txn_data[DATA]:
                    self.nodeServicesChanged(txn_data)
                    self.setPoolParams()

            if nodeName in self.nodeReg:
                # The node was already part of the pool so update
                _update(txn_data)
            else:
                seqNos, info = self.getNodeInfoFromLedger(nodeNym)
                if len(seqNos) == 1:
                    # Since only one transaction has been made, this is a new
                    # node transactions
                    self.connectNewRemote(txn_data, remoteName, self)
                    self.setPoolParams()
                else:
                    self.nodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(
                        info[DATA][CLIENT_IP], info[DATA][CLIENT_PORT])
                    _update(txn_data)
        else:
            logger.error("{} received unknown txn type {} in txn {}"
                         .format(self.name, typ, txn))
            return
コード例 #26
0
ファイル: client.py プロジェクト: tomergi/plenum
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 lastReqId: int = 0,
                 signer: Signer = None,
                 signers: Dict[str, Signer] = None,
                 basedirpath: str = None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        :param lastReqId: Request Id of the last request sent by client
        :param signer: Signer; mutually exclusive of signers
        :param signers: Dict of identifier -> Signer; useful for clients that
            need to support multiple signers
        """
        self.lastReqId = lastReqId
        self._clientStack = None
        self.minimumNodes = getMaxFailures(len(nodeReg)) + 1

        cliNodeReg = OrderedDict()
        for nm in nodeReg:
            val = nodeReg[nm]
            if len(val) == 3:
                ((ip, port), verkey, pubkey) = val
            else:
                ip, port = val
            cliNodeReg[nm] = HA(ip, port)

        nodeReg = cliNodeReg

        cha = ha if isinstance(ha, HA) else HA(*ha)
        stackargs = dict(
            name=name,
            ha=cha,
            main=False,  # stops incoming vacuous joins
            auto=AutoMode.always)
        if basedirpath:
            stackargs['basedirpath'] = basedirpath

        self.created = time.perf_counter()
        NodeStacked.__init__(self, stackParams=stackargs, nodeReg=nodeReg)
        logger.info("Client initialized with the following node registry:")
        lengths = [
            max(x) for x in zip(*[(len(name), len(host), len(str(port)))
                                  for name, (host, port) in nodeReg.items()])
        ]
        fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
            *lengths)
        for name, (host, port) in nodeReg.items():
            logger.info(fmt.format(name, host, port))

        Motor.__init__(self)

        self.inBox = deque()

        if signer and signers:
            raise ValueError("only one of 'signer' or 'signers' can be used")

        self.signers = None
        self.defaultIdentifier = None
        if signer:
            self.signers = {signer.identifier: signer}
            self.defaultIdentifier = signer.identifier
        elif signers:
            self.signers = signers
        else:
            self.setupDefaultSigner()

        self.connectNicelyUntil = 0  # don't need to connect nicely as a client
def test_node_catchup_after_restart_with_txns(
        sdk_new_node_caught_up, txnPoolNodeSet, tdir, tconf,
        sdk_node_set_with_node_added_after_some_txns, allPluginsPath):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_set_with_node_added_after_some_txns
    logger.debug("Stopping node {} with pool ledger size {}".format(
        new_node, new_node.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, new_node)
    looper.removeProdable(new_node)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    more_requests = 5
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, more_requests)
    logger.debug("Starting the stopped node, {}".format(new_node))
    nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
    config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
    newNode = TestNode(new_node.name,
                       config_helper=config_helper,
                       config=tconf,
                       ha=nodeHa,
                       cliha=nodeCHa,
                       pluginPaths=allPluginsPath)
    looper.add(newNode)
    txnPoolNodeSet[-1] = newNode

    # Make sure ledger is not synced initially
    check_ledger_state(newNode, DOMAIN_LEDGER_ID, LedgerState.not_synced)

    # Delay catchup reply processing so LedgerState does not change
    # TODO fix delay, sometimes it's not enough and lower 'check_ledger_state'
    # fails because newNode's domain ledger state is 'synced'
    delay_catchup_reply = 10
    newNode.nodeIbStasher.delay(cr_delay(delay_catchup_reply))
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Make sure ledger starts syncing (sufficient consistency proofs received)
    looper.run(
        eventually(check_ledger_state,
                   newNode,
                   DOMAIN_LEDGER_ID,
                   LedgerState.syncing,
                   retryWait=.5,
                   timeout=5))

    confused_node = txnPoolNodeSet[0]
    new_node_ledger = newNode.ledgerManager.ledgerRegistry[DOMAIN_LEDGER_ID]
    cp = new_node_ledger.catchUpTill
    start, end = cp.seqNoStart, cp.seqNoEnd
    cons_proof = confused_node.ledgerManager._buildConsistencyProof(
        DOMAIN_LEDGER_ID, start, end)

    bad_send_time = None

    def chk():
        nonlocal bad_send_time
        entries = newNode.ledgerManager.spylog.getAll(
            newNode.ledgerManager.canProcessConsistencyProof.__name__)
        for entry in entries:
            # `canProcessConsistencyProof` should return False after `syncing_time`
            if entry.result == False and entry.starttime > bad_send_time:
                return
        assert False

    def send_and_chk(ledger_state):
        nonlocal bad_send_time, cons_proof
        bad_send_time = perf_counter()
        confused_node.ledgerManager.sendTo(cons_proof, newNode.name)
        # Check that the ConsistencyProof messages rejected
        looper.run(eventually(chk, retryWait=.5, timeout=5))
        check_ledger_state(newNode, DOMAIN_LEDGER_ID, ledger_state)

    send_and_chk(LedgerState.syncing)

    # Not accurate timeout but a conservative one
    timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \
              2 * delay_catchup_reply
    waitNodeDataEquality(looper,
                         newNode,
                         *txnPoolNodeSet[:-1],
                         customTimeout=timeout)
    assert new_node_ledger.num_txns_caught_up == more_requests
    send_and_chk(LedgerState.synced)
コード例 #28
0
def ensureNewNodeConnectedClient(looper, client: TestClient, node: TestNode):
    stackParams = node.clientStackParams
    client.nodeReg[stackParams['name']] = HA('127.0.0.1', stackParams['ha'][1])
    looper.run(client.ensureConnectedToNodes())
コード例 #29
0
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        :param lastReqId: Request Id of the last request sent by client
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(name, basedirpath):
            logger.debug("Client {} ignoring given ha".format(ha))
            cha = getHaFromLocalEstate(name, basedirpath)
            if cha:
                cha = HA(*cha)
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.name = name
        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self,
                                self.name,
                                baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                0,
                self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        self.setF()

        stackargs = dict(
            name=name,
            ha=cha,
            main=False,  # stops incoming vacuous joins
            auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs, self.handleOneNodeMsg,
                                             self.nodeReg)
        self.nodestack.onConnsChanged = self.onConnsChanged

        logger.info(
            "Client {} initialized with the following node registry:".format(
                name))
        lengths = [
            max(x)
            for x in zip(*[(len(name), len(host), len(str(port)))
                           for name, (host, port) in self.nodeReg.items()])
        ]
        fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
            *lengths)
        for name, (host, port) in self.nodeReg.items():
            logger.info(fmt.format(name, host, port))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger
        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))
コード例 #30
0
 def _spawnClient(self, name, seed, host='0.0.0.0'):
     self.__startPort += randint(100, 1000)
     address = HA(host, self.__startPort)
     logger.info("Seed for client {} is {}, "
                 "its len is {}".format(name, seed, len(seed)))
     return createClientAndWalletWithSeed(name, seed, address)