Пример #1
0
def checkIfSameReplicaIsPrimary(looper: Looper,
                                replicas: Sequence[TestReplica] = None,
                                retryWait: float = 1,
                                timeout: float = 20):
    # One and only one primary should be found and every replica should agree
    # on same primary

    def checkElectionDone():
        unknowns = [r for r in replicas if r.primaryName is None]
        assert len(unknowns) == 0, "election should be complete, " \
                                   "but {} out of {} ({}) don't know who the primary " \
                                   "is for protocol instance {}". \
            format(len(unknowns), len(replicas), unknowns, replicas[0].instId)

    def checkPrisAreOne():  # number of expected primaries
        pris = sum(1 for r in replicas if r.isPrimary)
        assert pris == 1, "Primary count should be 1, but was {} for " \
                          "protocol no {}".format(pris, replicas[0].instId)

    def checkPrisAreSame():
        pris = {r.primaryName for r in replicas}
        assert len(pris) == 1, "Primary should be same for all, but were {} " \
                               "for protocol no {}" \
            .format(pris, replicas[0].instId)

    looper.run(
        eventuallyAll(checkElectionDone, checkPrisAreOne, checkPrisAreSame,
                      retryWait=retryWait, totalTimeout=timeout))
Пример #2
0
def ensure_node_disconnected(looper: Looper,
                             disconnected: TestNode,
                             other_nodes: Iterable[TestNode],
                             timeout: float = None):
    timeout = timeout or (len(other_nodes) - 1)
    looper.run(eventually(check_node_disconnected, disconnected,
                          other_nodes, retryWait=1, timeout=timeout))
Пример #3
0
def prepareNodeSet(looper: Looper, txnPoolNodeSet):
    # TODO: Come up with a more specific name for this

    # Key sharing party
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Remove all the nodes
    for n in list(txnPoolNodeSet):
        looper.removeProdable(txnPoolNodeSet)
        txnPoolNodeSet.remove(n)
Пример #4
0
def reconnect_node_and_ensure_connected(looper: Looper,
                                        poolNodes: Iterable[TestNode],
                                        connect: Union[str, TestNode],
                                        timeout=None):
    if isinstance(connect, TestNode):
        connect = connect.name
    assert isinstance(connect, str)

    reconnectPoolNode(looper, poolNodes, connect)
    looper.run(checkNodesConnected(poolNodes, customTimeout=timeout))
Пример #5
0
def checkPoolReady(looper: Looper,
                   nodes: Sequence[TestNode],
                   customTimeout=None):
    """
    Check that pool is in Ready state
    """

    timeout = customTimeout or waits.expectedPoolStartUpTimeout(len(nodes))
    looper.run(
        eventually(checkNodesAreReady, nodes,
                   retryWait=.25,
                   timeout=timeout,
                   ratchetSteps=10))
Пример #6
0
def checkPoolReady(looper: Looper,
                   nodes: Sequence[TestNode],
                   customTimeout=None):
    """
    Check that pool is in Ready state
    """

    timeout = customTimeout or waits.expectedPoolStartUpTimeout(len(nodes))
    looper.run(
        eventually(checkNodesAreReady,
                   nodes,
                   retryWait=.25,
                   timeout=timeout,
                   ratchetSteps=10))
Пример #7
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      customTimeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas.values() if r.isPrimary]
        assert len(prims) <= 1

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))
    for node in nodes:
        looper.run(eventually(checkAtMostOnePrim,
                              node,
                              retryWait=retryWait,
                              timeout=timeout))
Пример #8
0
def setupClient(looper: Looper,
                nodes: Sequence[TestNode] = None,
                nodeReg=None,
                tmpdir=None,
                identifier=None,
                verkey=None):
    client1, wallet = genTestClient(nodes=nodes,
                                    nodeReg=nodeReg,
                                    tmpdir=tmpdir,
                                    identifier=identifier,
                                    verkey=verkey)
    looper.add(client1)
    looper.run(client1.ensureConnectedToNodes())
    return client1, wallet
Пример #9
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      timeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas if r.isPrimary]
        assert len(prims) <= 1

    for node in nodes:
        looper.run(
            eventually(checkAtMostOnePrim,
                       node,
                       retryWait=retryWait,
                       timeout=timeout))
Пример #10
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      customTimeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas if r.isPrimary]
        assert len(prims) <= 1

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))
    for node in nodes:
        looper.run(eventually(checkAtMostOnePrim,
                              node,
                              retryWait=retryWait,
                              timeout=timeout))
Пример #11
0
def put_load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=DidSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=True) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                customTimeoutPerReq=100,
                                                override_timeout_limit=True)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>Total {} in {}<<<'.format(numReqs, end - start))
        exit(0)
Пример #12
0
def run_node():

    nodeReg = OrderedDict([
        ('Alpha', ('127.0.0.1', 8001)),
        ('Beta', ('127.0.0.1', 8003)),
        ('Gamma', ('127.0.0.1', 8005)),
        ('Delta', ('127.0.0.1', 8007))])

    genesisTxns = [{'txnId': '6b86b273ff34fce19d6b804eff5a3f57'
                             '47ada4eaa22f1d49c01e52ddb7875b4b',
                    'type': NYM,
                    'dest': 'o7z4QmFkNB+mVkFI2BwX0Hdm1BGhnz8psWnKYIXWTaQ=',
                    'role': TRUST_ANCHOR}]

    # the first argument should be the node name
    try:
        nodeName = sys.argv[1]
    except IndexError:
        names = list(nodeReg.keys())
        print("Please supply a node name (one of {}) as the first argument.".
              format(", ".join(names)))
        print("For example:")
        print("    {} {}".format(sys.argv[0], names[0]))
        return

    with Looper(debug=False) as looper:
        # Nodes persist keys when bootstrapping to other nodes and reconnecting
        # using an ephemeral temporary directory when proving a concept is a
        # nice way to keep things tidy.
        with TemporaryDirectory() as tmpdir:
            node = Node(nodeName, nodeReg, basedirpath=tmpdir)
            node.addGenesisTxns(genesisTxns)
            looper.add(node)
            node.startKeySharing()
            looper.run()
Пример #13
0
def runAgent(agent, looper=None, bootstrap=None):
    assert agent

    def do_run(looper):
        agent.loop = looper.loop
        looper.add(agent)
        logger.info("Running {} now (port: {})".format(agent.name, agent.port))
        if bootstrap:
            looper.run(runBootstrap(bootstrap))

    if looper:
        do_run(looper)
    else:
        with Looper(debug=True, loop=agent.loop) as looper:
            do_run(looper)
            looper.run()


# Note: Commented it as didn't find any usage of this method
# def run_agent(looper, wallet, agent):
#
#     def run():
#         _agent = agent
#         wallet.pendSyncRequests()
#         prepared = wallet.preparePending()
#         _agent.client.submitReqs(*prepared)
#
#         runAgent(_agent, looper)
#
#         return _agent, wallet
#
#     return run
Пример #14
0
def run_node(config, name, node_port, client_port):
    node_ha = HA("0.0.0.0", node_port)
    client_ha = HA("0.0.0.0", client_port)

    logFileName = os.path.join(config.baseDir, name + ".log")

    Logger(config)
    Logger().enableFileLogging(logFileName)

    logger = getlogger()
    logger.setLevel(config.logLevel)
    logger.debug("You can find logs in {}".format(logFileName))

    vars = [var for var in os.environ.keys() if var.startswith("INDY")]
    logger.debug("Indy related env vars: {}".format(vars))

    from stp_core.loop.looper import Looper
    from indy_node.server.node import Node
    with Looper(debug=config.LOOPER_DEBUG) as looper:
        node = Node(name,
                    nodeRegistry=None,
                    basedirpath=config.baseDir,
                    ha=node_ha,
                    cliha=client_ha)
        looper.add(node)
        looper.run()
Пример #15
0
def testReqExecWhenReturnedByMaster(tdir_for_func):
    with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            client1, wallet1 = setupNodesAndClient(looper,
                                                   nodeSet,
                                                   tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)
            looper.run(
                eventually(checkSufficientRepliesRecvd,
                           client1.inBox,
                           req.reqId,
                           1,
                           retryWait=1,
                           timeout=15))

            async def chk():
                for node in nodeSet:
                    entries = node.spylog.getAll(node.processOrdered.__name__)
                    for entry in entries:
                        arg = entry.params['ordered']
                        result = entry.result
                        if arg.instId == node.instances.masterId:
                            assert result
                        else:
                            assert result is None

            looper.run(eventually(chk, timeout=3))
Пример #16
0
def runAgent(agent, looper=None, bootstrap=None):
    assert agent

    def is_connected(agent):
        client = agent.client
        if (client.mode is None) or (not client.can_send_write_requests()):
            raise NotConnectedToNetwork(
                "Client hasn't finished catch-up with Pool Ledger yet or "
                "doesn't have sufficient number of connections")

    async def wait_until_connected(agent):
        from stp_core.loop.eventually import eventually
        await eventually(is_connected,
                         agent,
                         timeout=CONNECTION_TIMEOUT,
                         retryWait=2)

    def do_run(looper):
        agent.loop = looper.loop
        looper.add(agent)
        logger.info("Running {} now (port: {})".format(agent.name, agent.port))
        if bootstrap:
            looper.run(wait_until_connected(agent))
            looper.run(runBootstrap(bootstrap))

    if looper:
        do_run(looper)
    else:
        with Looper(debug=getConfig().LOOPER_DEBUG, loop=agent.loop) as looper:
            do_run(looper)
            looper.run()
Пример #17
0
def testTestNodeDelay(tdir_for_func):
    nodeNames = {"testA", "testB"}
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodes:
        nodeA = nodes.getNode("testA")
        nodeB = nodes.getNode("testB")

        with Looper(nodes) as looper:
            looper.run(checkNodesConnected(nodes))

            # send one message, without delay
            looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))

            # set delay, then send another message
            # and find that it doesn't arrive
            delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime()
            nodeB.nodeIbStasher.delay(
                delayerMsgTuple(delay, TestMsg, nodeA.name)
            )
            with pytest.raises(AssertionError):
                looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))

            # but then find that it arrives after the delay
            # duration has passed
            timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay
            looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB,
                                                   customTimeout=timeout))

            # reset the delay, and find another message comes quickly
            nodeB.nodeIbStasher.reset_delays_and_process_delayeds()
            looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))
Пример #18
0
 def _(space,
       looper=None,
       unique_name=None):
     def new():
         client_tdir = os.path.join(tdir, 'home', space)
         c = newCLI(looper,
                    client_tdir,
                    conf=tconf,
                    poolDir=tdirWithPoolTxns,
                    domainDir=tdirWithDomainTxns,
                    multiPoolNodes=multiPoolNodes,
                    unique_name=unique_name or space,
                    logFileName=logFileName,
                    cliClass=cliClass,
                    name=name,
                    agent=agent,
                    nodes_chroot=tdir)
         return c
     if not looper:
         looper = def_looper
     if looper:
         yield new()
     else:
         with Looper(debug=False) as looper:
             yield new()
Пример #19
0
def testKeyShareParty(tdir_for_func):
    """
    connections to all nodes should be successfully established when key
    sharing is enabled.
    """
    nodeReg = genNodeReg(5)

    logger.debug("-----sharing keys-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            looper.run(checkNodesConnected(nodeSet))

    logger.debug("-----key sharing done, connect after key sharing-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as loop:
            loop.run(checkNodesConnected(nodeSet), msgAll(nodeSet))
Пример #20
0
def run_node(config, name, node_ip, node_port, client_ip, client_port):
    node_ha = HA(node_ip, node_port)
    client_ha = HA(client_ip, client_port)

    node_config_helper = NodeConfigHelper(name, config)

    logFileName = os.path.join(node_config_helper.log_dir, name + ".log")

    logger = getlogger()
    Logger().apply_config(config)
    Logger().enableFileLogging(logFileName)

    logger.setLevel(config.logLevel)
    logger.debug("You can find logs in {}".format(logFileName))

    vars = [var for var in os.environ.keys() if var.startswith("INDY")]
    logger.debug("Indy related env vars: {}".format(vars))

    with Looper(debug=config.LOOPER_DEBUG) as looper:
        node = Node(name,
                    config_helper=node_config_helper,
                    ha=node_ha, cliha=client_ha)
        node = integrate(node_config_helper, node, logger)
        looper.add(node)
        looper.run()
Пример #21
0
def testActionQueue():
    class Q1(Motor, HasActionQueue):
        def __init__(self, name):
            self.name = name
            self.results = {}
            Motor.__init__(self)
            HasActionQueue.__init__(self)

        def start(self, loop):
            pass

        async def prod(self, limit: int = None) -> int:
            return self._serviceActions()

        def meth1(self, x):
            if 'meth1' not in self.results:
                self.results['meth1'] = []
            self.results['meth1'].append((x, time.perf_counter()))

    with Looper(debug=True) as looper:
        q1 = Q1('q1')
        looper.add(q1)
        q1._schedule(partial(q1.meth1, 1), 2)
        q1._schedule(partial(q1.meth1, 2), 4)
        looper.runFor(2.3)
        assert 1 in [t[0] for t in q1.results['meth1']]
        assert 2 not in [t[0] for t in q1.results['meth1']]
Пример #22
0
def testReqExecWhenReturnedByMaster(tdir_for_func, tconf_for_func):
    with TestNodeSet(tconf_for_func, count=4, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            client1, wallet1 = setupNodesAndClient(looper,
                                                   nodeSet,
                                                   tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)
            waitForSufficientRepliesForRequests(looper,
                                                client1,
                                                requests=[req])

            async def chk():
                for node in nodeSet:
                    entries = node.spylog.getAll(node.processOrdered.__name__)
                    for entry in entries:
                        arg = entry.params['ordered']
                        result = entry.result
                        if arg.instId == node.instances.masterId:
                            assert result
                        else:
                            assert result is False

            timeout = waits.expectedOrderingTime(
                nodeSet.nodes['Alpha'].instances.count)
            looper.run(eventually(chk, timeout=timeout))
Пример #23
0
    def __init__(self,
                 nodeCount=None,
                 nodeRegistry=None,
                 nodeSet=None,
                 looper=None,
                 tmpdir=None):
        super().__init__()

        self.actor = None  # type: Organization

        if nodeSet is None:
            self.nodes = self.enter_context(
                TestNodeSet(count=nodeCount,
                            nodeReg=nodeRegistry,
                            tmpdir=tmpdir))
        else:
            self.nodes = nodeSet
        self.nodeReg = self.nodes.nodeReg
        if looper is None:
            self.looper = self.enter_context(Looper(self.nodes))
        else:
            self.looper = looper
        self.tmpdir = tmpdir
        self.ran = []  # history of what has been run
        self.userId = None
        self.userNym = None
        self.trustAnchor = None
        self.trustAnchorNym = None
        self.agent = None
        self.agentNym = None
Пример #24
0
def run_node():

    nodeReg = OrderedDict([('Alpha', ('127.0.0.1', 9701)),
                           ('Beta', ('127.0.0.1', 9703)),
                           ('Gamma', ('127.0.0.1', 9705)),
                           ('Delta', ('127.0.0.1', 9707))])

    # the first argument should be the node name
    try:
        nodeName = sys.argv[1]
    except IndexError:
        names = list(nodeReg.keys())
        print("Please supply a node name (one of {}) as the first argument.".
              format(", ".join(names)))
        print("For example:")
        print("    {} {}".format(sys.argv[0], names[0]))
        return

    with Looper(debug=False) as looper:
        # Nodes persist keys when bootstrapping to other nodes and reconnecting
        # using an ephemeral temporary directory when proving a concept is a
        # nice way to keep things tidy.
        with SafeTemporaryDirectory() as tmpdir:
            node = Node(nodeName, nodeReg, basedirpath=tmpdir)

            # see simple_client.py
            joe_verkey = b'cffbb88a142be2f62d1b408818e21a2f' \
                         b'887c4442ae035a260d4cc2ec28ae24d6'
            node.clientAuthNr.addIdr("Joe", joe_verkey)

            looper.add(node)
            node.startKeySharing()
            looper.run()
Пример #25
0
def load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=SimpleSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                fVal=2,
                                                customTimeoutPerReq=3)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>{}<<<'.format(end - start))
        exit(0)
Пример #26
0
def addNodeBack(node_set,
                looper: Looper,
                node: Node,
                tconf,
                tdir) -> TestNode:
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=node.nodestack.ha,
                             cliha=node.clientstack.ha)
    for node in node_set:
        if node.name != restartedNode.name:
            node.nodestack.reconnectRemoteWithName(restartedNode.name)
    node_set.append(restartedNode)
    looper.add(restartedNode)
    return restartedNode
def testPostingThroughput(postingStatsEnabled,
                          decreasedMonitoringTimeouts,
                          looper: Looper,
                          nodeSet: TestNodeSet,
                          wallet1, client1):
    """
    The throughput after `DashboardUpdateFreq` seconds and before sending any
    requests should be zero.
    Send `n` requests in less than `ThroughputWindowSize` seconds and the
    throughput till `ThroughputWindowSize` should consider those `n` requests.
    After `ThroughputWindowSize` seconds the throughput should be zero
    Test `totalRequests` too.
    """

    config = decreasedMonitoringTimeouts

    # We are sleeping for this window size, because we need to clear previous
    # values that were being stored for this much time in tests
    looper.runFor(config.ThroughputWindowSize)

    reqCount = 10
    for node in nodeSet:
        assert node.monitor.highResThroughput == 0
        assert node.monitor.totalRequests == 0

    sendReqsToNodesAndVerifySuffReplies(looper,
                                        wallet1,
                                        client1,
                                        reqCount,
                                        nodeSet.f)

    for node in nodeSet:
        assert len(node.monitor.orderedRequestsInLast) == reqCount
        assert node.monitor.highResThroughput > 0
        assert node.monitor.totalRequests == reqCount
        # TODO: Add implementation to actually call firebase plugin
        # and test if firebase plugin is sending total request count
        # if node is primary

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendThroughput.__name__) > 0

    # Run for latency window duration so that `orderedRequestsInLast`
    # becomes empty
    looper.runFor(config.ThroughputWindowSize)

    def chk():
        for node in nodeSet:
            assert len(node.monitor.orderedRequestsInLast) == 0
            assert node.monitor.highResThroughput == 0
            assert node.monitor.totalRequests == reqCount

    timeout = config.ThroughputWindowSize
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
Пример #28
0
    def run(self):
        try:
            self._createClientAndWallet()

            self._looper = Looper(debug=getConfig().LOOPER_DEBUG)
            try:
                self._startClient()
                self.do()
            finally:
                self._looper.shutdownSync()
                self._looper = None

        except BaseException as ex:
            logger.exception(
                "User scenario throws out exception: {}".format(ex),
                exc_info=ex)
            raise ex
Пример #29
0
def addNodeBack(node_set,
                looper: Looper,
                node: Node,
                tconf,
                tdir) -> TestNode:
    config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
    restartedNode = TestNode(node.name,
                             config_helper=config_helper,
                             config=tconf,
                             ha=node.nodestack.ha,
                             cliha=node.clientstack.ha)
    for node in node_set:
        if node.name != restartedNode.name:
            node.nodestack.reconnectRemoteWithName(restartedNode.name)
    node_set.append(restartedNode)
    looper.add(restartedNode)
    return restartedNode
Пример #30
0
 def run(self, coro, nodecount=4):
     tmpdir = self.fresh_tdir()
     with self.testNodeSetClass(count=nodecount, tmpdir=tmpdir) as nodeset:
         with Looper(nodeset) as looper:
             # for n in nodeset:
             #     n.startKeySharing()
             ctx = adict(looper=looper, nodeset=nodeset, tmpdir=tmpdir)
             looper.run(checkNodesConnected(nodeset))
             ensureElectionsDone(looper=looper, nodes=nodeset)
             looper.run(coro(ctx))
Пример #31
0
def checkProtocolInstanceSetup(looper: Looper,
                               nodes: Sequence[TestNode],
                               retryWait: float = 1,
                               customTimeout: float = None,
                               instances: Sequence[int] = None,
                               check_primaries=True):
    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))

    checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper,
                                                nodes=nodes,
                                                retryWait=retryWait,
                                                timeout=timeout,
                                                instances_list=instances)

    checkEveryNodeHasAtMostOnePrimary(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=timeout)

    def check_not_in_view_change():
        assert all([
            not n.master_replica._consensus_data.waiting_for_new_view
            for n in nodes
        ])

    looper.run(
        eventually(check_not_in_view_change,
                   retryWait=retryWait,
                   timeout=customTimeout))

    if check_primaries:
        for n in nodes[1:]:
            assert nodes[0].primaries == n.primaries

    primaryReplicas = {
        replica.instId: replica
        for node in nodes for replica in node.replicas.values()
        if replica.isPrimary
    }
    return [
        r[1]
        for r in sorted(primaryReplicas.items(), key=operator.itemgetter(0))
    ]
Пример #32
0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)

    with pytest.raises(PublicKeyNotFoundOnDisk):
        with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func,
                         keyshare=False) as nodes:
            with Looper(nodes) as looper:
                looper.runFor(2)
Пример #33
0
def runAgentCli(name, agentCreator, looper=None, bootstrap=True):
    def run(looper):
        agentCli = bootstrapAgentCli(name, agentCreator, looper, bootstrap)
        commands = sys.argv[1:]
        looper.run(agentCli.shell(*commands))

    if looper:
        run(looper)
    else:
        with Looper(debug=False) as looper:
            run(looper)
Пример #34
0
def runAgentCli(agent, config, looper=None, bootstrap=None):
    def run(looper):
        logger.info("Running {} now (port: {})".format(agent.name, agent.port))
        agentCli = bootstrapAgentCli(agent.name, agent, looper, bootstrap, config)
        commands = sys.argv[1:]
        looper.run(agentCli.shell(*commands))

    if looper:
        run(looper)
    else:
        with Looper(debug=False) as looper:
            run(looper)
Пример #35
0
def run_node():
    ip = '52.37.111.254'
    cliNodeReg = OrderedDict([
        # ('AlphaC', ('127.0.0.1', 8002)),
        ('AlphaC', (ip, 4002)),
        ('BetaC', (ip, 4004)),
        ('GammaC', (ip, 4006)),
        ('DeltaC', (ip, 4008))
    ])

    with Looper(debug=False) as looper:
        # Nodes persist keys when bootstrapping to other nodes and reconnecting
        # using an ephemeral temporary directory when proving a concept is a
        # nice way to keep things clean.
        clientName = 'Joem'

        # this seed is used by the signer to deterministically generate
        # a signature verification key that is shared out of band with the
        # consensus pool
        seed = b'g034OTmx7qBRtywvCbKhjfALHnsdcJpl'
        assert len(seed) == 32
        signer = SimpleSigner(seed=seed)
        assert signer.verstr == 'o7z4QmFkNB+mVkFI2BwX0H' \
                                'dm1BGhnz8psWnKYIXWTaQ='

        client_address = ('0.0.0.0', 8000)

        tmpdir = os.path.join(tempfile.gettempdir(), "sovrin_clients",
                              clientName)
        client = Client(clientName,
                        cliNodeReg,
                        ha=client_address,
                        signer=signer,
                        basedirpath=tmpdir)
        looper.add(client)

        # give the client time to connect
        looper.runFor(3)

        # a simple message
        msg = {TXN_TYPE: NYM}

        # submit the request to the pool
        request, = client.submit(msg)

        # allow time for the request to be executed
        looper.runFor(3)

        reply, status = client.getReply(request.reqId)
        print('')
        print('Reply: {}\n'.format(reply))
        print('Status: {}\n'.format(status))
Пример #36
0
def wait_for_elections_done_on_given_nodes(looper: Looper,
                                           nodes: Iterable[Node],
                                           num_of_instances: int,
                                           timeout: float,
                                           retry_wait: float=1.0):
    """
    Wait for primary elections to be completed on all the replicas
    of the given nodes.
    """
    def check_num_of_replicas():
        for node in nodes:
            assert len(node.replicas) == num_of_instances

    def verify_each_replica_knows_its_primary():
        for node in nodes:
            for inst_id, replica in node.replicas.items():
                assert replica.hasPrimary

    looper.run(eventuallyAll(check_num_of_replicas,
                             verify_each_replica_knows_its_primary,
                             totalTimeout=timeout,
                             retryWait=retry_wait))
def testPostingLatency(postingStatsEnabled,
                       decreasedMonitoringTimeouts,
                       looper: Looper,
                       nodeSet: TestNodeSet,
                       wallet1, client1):
    """
    The latencies (master as well as average of backups) after
    `DashboardUpdateFreq` seconds and before sending any requests should be zero.
    Send `n` requests in less than `LatencyWindowSize` seconds and the
    latency till `LatencyWindowSize` should consider those `n` requests.
    After `LatencyWindowSize` seconds the latencies should be zero
    """

    config = decreasedMonitoringTimeouts

    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)
    reqCount = 10
    for node in nodeSet:
        assert node.monitor.masterLatency == 0
        assert node.monitor.avgBackupLatency == 0

    sendReqsToNodesAndVerifySuffReplies(looper,
                                        wallet1,
                                        client1,
                                        reqCount,
                                        nodeSet.f)

    for node in nodeSet:
        assert node.monitor.masterLatency > 0
        assert node.monitor.avgBackupLatency > 0

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendLatencies.__name__) > 0

    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)

    def chk():
        for node in nodeSet:
            assert node.monitor.masterLatency == 0
            assert node.monitor.avgBackupLatency == 0

    timeout = config.LatencyWindowSize
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
Пример #38
0
def wait_for_elections_done_on_given_nodes(looper: Looper,
                                           nodes: Iterable[Node],
                                           num_of_instances: int,
                                           timeout: float,
                                           retry_wait: float=1.0):
    """
    Wait for primary elections to be completed on all the replicas
    of the given nodes.
    """
    def check_num_of_replicas():
        for node in nodes:
            assert len(node.replicas) == num_of_instances

    def verify_each_replica_knows_its_primary():
        for node in nodes:
            for inst_id, replica in node.replicas.items():
                assert replica.hasPrimary

    looper.run(eventuallyAll(check_num_of_replicas,
                             verify_each_replica_knows_its_primary,
                             totalTimeout=timeout,
                             retryWait=retry_wait))
Пример #39
0
def test_hasProdable():
    looper = Looper(autoStart=False)
    with pytest.raises(ValueError):
        Looper().hasProdable(Prodable(), 'prodable')
    looper.shutdownSync()