Пример #1
0
def run_node():

    nodeReg = OrderedDict([
        ('Alpha', ('127.0.0.1', 9701)),
        ('Beta', ('127.0.0.1', 9703)),
        ('Gamma', ('127.0.0.1', 9705)),
        ('Delta', ('127.0.0.1', 9707))])

    # the first argument should be the node name
    try:
        nodeName = sys.argv[1]
    except IndexError:
        names = list(nodeReg.keys())
        print("Please supply a node name (one of {}) as the first argument.".
              format(", ".join(names)))
        print("For example:")
        print("    {} {}".format(sys.argv[0], names[0]))
        return

    with Looper(debug=False) as looper:
        # Nodes persist keys when bootstrapping to other nodes and reconnecting
        # using an ephemeral temporary directory when proving a concept is a
        # nice way to keep things tidy.
        with TemporaryDirectory() as tmpdir:
            node = Node(nodeName, nodeReg, basedirpath=tmpdir)

            # see simple_client.py
            joe_verkey = b'cffbb88a142be2f62d1b408818e21a2f' \
                         b'887c4442ae035a260d4cc2ec28ae24d6'
            node.clientAuthNr.addClient("Joe", joe_verkey)

            looper.add(node)
            node.startKeySharing()
            looper.run()
Пример #2
0
def test_drop_two_last_reqs(node):
    rek_keys_to_drop = []
    rek_keys_to_drop.append(__prepare_req_for_drop(node.requests, "2", "propagates"))
    rek_keys_to_drop.append(__prepare_req_for_drop(node.requests, "3", "ordering"))
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 1
    assert node.propagates_phase_req_timeouts == 1
    assert node.ordering_phase_req_timeouts == 1
Пример #3
0
    def __init__(self, *args, **kwargs):
        Node.__init__(self, *args, **kwargs)
        TestNodeCore.__init__(self, *args, **kwargs)
        # Balances of all client
        self.balances = {}  # type: Dict[str, int]

        # Txns of all clients, each txn is a tuple like (from, to, amount)
        self.txns = []  # type: List[Tuple]
Пример #4
0
def testNodesConnectWhenTheyAllStartAtOnce():
    with TemporaryDirectory() as td:
        with Looper() as looper:
            nodes = []
            for name in nodeReg:
                node = Node(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
            looper.run(checkNodesConnected(nodes))
Пример #5
0
def test_drop_all_reqs(node, phase):
    for req_identifier in req_identifiers:
        __prepare_req_for_drop(node.requests, req_identifier, phase)
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 0
    if phase == "propagates":
        assert node.propagates_phase_req_timeouts == 3
        assert node.ordering_phase_req_timeouts == 0
    elif phase == "ordering":
        assert node.propagates_phase_req_timeouts == 0
        assert node.ordering_phase_req_timeouts == 3
Пример #6
0
def test_drop_all_reqs(node, phase):
    for req_identifier in req_identifiers:
        __prepare_req_for_drop(node.requests, req_identifier, phase)
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 0
    if phase == "propagates":
        assert node.propagates_phase_req_timeouts == 3
        assert node.ordering_phase_req_timeouts == 0
    elif phase == "ordering":
        assert node.propagates_phase_req_timeouts == 0
        assert node.ordering_phase_req_timeouts == 3
Пример #7
0
    def __init__(self, *args, **kwargs):
        from plenum.common.stacks import nodeStackClass, clientStackClass
        self.NodeStackClass = nodeStackClass
        self.ClientStackClass = clientStackClass

        Node.__init__(self, *args, **kwargs)
        TestNodeCore.__init__(self, *args, **kwargs)
        # Balances of all client
        self.balances = {}  # type: Dict[str, int]

        # Txns of all clients, each txn is a tuple like (from, to, amount)
        self.txns = []  # type: List[Tuple]
Пример #8
0
    def __init__(self, *args, **kwargs):
        from plenum.common.stacks import nodeStackClass, clientStackClass
        self.NodeStackClass = nodeStackClass
        self.ClientStackClass = clientStackClass

        Node.__init__(self, *args, **kwargs)
        TestNodeCore.__init__(self, *args, **kwargs)
        # Balances of all client
        self.balances = {}  # type: Dict[str, int]

        # Txns of all clients, each txn is a tuple like (from, to, amount)
        self.txns = []  # type: List[Tuple]
Пример #9
0
def test_drop_last_req(node, phase):
    req_identifier = req_identifiers[2]
    req_key_to_drop = __prepare_req_for_drop(node.requests, req_identifier, phase)
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 2
    assert req_key_to_drop not in node.requests
    if phase == "propagates":
        assert node.propagates_phase_req_timeouts == 1
        assert node.ordering_phase_req_timeouts == 0
    elif phase == "ordering":
        assert node.propagates_phase_req_timeouts == 0
        assert node.ordering_phase_req_timeouts == 1
Пример #10
0
    def __init__(self, *args, **kwargs):
        from plenum.common.stacks import nodeStackClass, clientStackClass
        self.NodeStackClass = nodeStackClass
        self.ClientStackClass = clientStackClass

        Node.__init__(self, *args, **kwargs, bootstrap_cls=TestNodeBootstrap)
        self.view_changer = create_view_changer(self, TestViewChanger)
        TestNodeCore.__init__(self, *args, **kwargs)
        # Balances of all client
        self.balances = {}  # type: Dict[str, int]

        # Txns of all clients, each txn is a tuple like (from, to, amount)
        self.txns = []  # type: List[Tuple]
def test_future_vcdone_vc(fake_node, view_change_in_progress):
    """
    If from_current_state is False, then message should be put only into msgsForFutureViews queue
    """
    frm = 'Node3'
    fake_node.view_changer.view_change_in_progress = view_change_in_progress
    current_view = fake_node.viewNo
    proposed_view_no = current_view + 1
    msg = ViewChangeDone(proposed_view_no, frm, fake_node.ledger_summary)
    res = Node.msgHasAcceptableViewNo(fake_node, msg, frm)
    assert proposed_view_no in fake_node.msgsForFutureViews
    assert proposed_view_no not in fake_node.msgsToViewChanger
    assert not res
Пример #12
0
def test_future_vcdone_vc(fake_node, view_change_in_progress):
    """
    If from_current_state is False, then message should be put only into msgsForFutureViews queue
    """
    frm = 'Node3'
    fake_node.view_changer.view_change_in_progress = view_change_in_progress
    current_view = fake_node.viewNo
    proposed_view_no = current_view + 1
    msg = ViewChangeDone(proposed_view_no, frm, fake_node.ledger_summary)
    res = Node.msgHasAcceptableViewNo(fake_node, msg, frm)
    assert proposed_view_no in fake_node.msgsForFutureViews
    assert proposed_view_no not in fake_node.msgsToViewChanger
    assert not res
Пример #13
0
def test_empty_line(tmpdir, looper):
    base_dir = str(tmpdir)
    name = "Node1"
    ledger_file = 'pool_transactions_sandbox'

    gen_txn = list(SAMPLE_GEN_4_POOL)
    gen_txn.insert(1, " ")

    _setup_genesis(base_dir, ledger_file, gen_txn)

    initialize_node_environment(name=name, base_dir=base_dir)

    n = Node(name=name, basedirpath=base_dir)
    looper.add(n)
Пример #14
0
def test_duplicate_tnx(tmpdir, looper):
    base_dir = str(tmpdir)
    name = "Node1"
    ledger_file = 'pool_transactions_sandbox'

    gen_txn = list(SAMPLE_GEN_4_POOL)
    gen_txn[1] = SAMPLE_GEN_NODE_1

    _setup_genesis(base_dir, ledger_file, gen_txn)

    initialize_node_environment(name=name, base_dir=base_dir)

    n = Node(name=name, basedirpath=base_dir)
    looper.add(n)
Пример #15
0
    def newNode(self, nodeName: str):
        opVerifiers = self.plugins['VERIFICATION'] if self.plugins else []
        if nodeName in self.nodes:
            self.print("Node {} already exists.".format(nodeName))
            return

        if nodeName == "all":
            names = set(self.nodeReg.keys()) - set(self.nodes.keys())
        elif nodeName not in self.nodeReg:
            tokens = [
                (Token.Error, "Invalid node name '{}'. ".format(nodeName))]
            self.printTokens(tokens)
            self.showValidNodes()
            return
        else:
            names = [nodeName]
        for name in names:
            node = Node(name, self.nodeReg, basedirpath=self.tmpdir,
                        opVerifiers=opVerifiers)
            self.nodes[name] = node
            self.looper.add(node)
            node.startKeySharing()
            for client in self.clients.values():
                self.bootstrapClientKey(client, node)
Пример #16
0
    def getTxn(self, identifier, reqId, **kwargs):
        typ = kwargs[TXN_TYPE]
        edgeClass = getEdgeByTxnType(typ)
        edgeProps = ", ".join("@this.{} as __e_{}".format(name, name) for name in
                              txnEdgeProps)
        vertexProps = ", ".join("in.{} as __v_{}".format(name, name) for name in
                                chain.from_iterable(
                                    Vertices._Properties.values()))
        txnId = Node.genTxnId(identifier, reqId)
        cmd = "select {}, {} from {} where {} = '{}'". \
            format(edgeProps, vertexProps, edgeClass, f.TXN_ID.nm, txnId)

        result = self.client.command(cmd)
        return None if not result \
            else self.makeResult(typ, self.cleanKeyNames(result[0].oRecordData))
Пример #17
0
def testNodesComingUpAtDifferentTimes():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            nodes = []

            names = list(nodeReg.keys())
            shuffle(names)
            waits = [randint(1, 10) for _ in names]
            rwaits = [randint(1, 10) for _ in names]

            for i, name in enumerate(names):
                node = Node(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
                looper.runFor(waits[i])
            looper.run(checkNodesConnected(nodes,
                                           overrideTimeout=10))
            print("connects")
            print("node order: {}".format(names))
            print("waits: {}".format(waits))

            for n in nodes:
                n.stop()
            for i, n in enumerate(nodes):
                n.start()
                looper.runFor(rwaits[i])
            looper.runFor(3)
            looper.run(checkNodesConnected(nodes,
                                           overrideTimeout=10))
            print("reconnects")
            print("node order: {}".format(names))
            print("rwaits: {}".format(rwaits))
def test_from_current_state(fake_node):
    """
    If from_current_state is True and is initial propagate primary (current viewNo is 0),
    then message should be put into msgsToViewChanger queue with from_current_state flag as True
    """
    frm = 'Node3'
    fake_node.view_changer.view_change_in_progress = False
    current_view = fake_node.view_changer.last_completed_view_no
    proposed_view_no = current_view + 1
    msg = ViewChangeDone(proposed_view_no, frm, fake_node.ledger_summary)
    res = Node.msgHasAcceptableViewNo(fake_node, msg, frm, from_current_state=True)
    msg, frm = fake_node.msgsToViewChanger[0]
    assert len(fake_node.msgsToViewChanger) == 1
    assert msg.from_current_state
    assert res is False
Пример #19
0
def test_utf_16(tmpdir, looper):
    base_dir = str(tmpdir)
    name = "Node1"
    ledger_file = 'pool_transactions_sandbox'

    gen_txn = list(SAMPLE_GEN_4_POOL)

    default_file = os.path.join(base_dir, ledger_file)
    genesis_data = "\n".join(gen_txn)
    with open(default_file, 'wb') as f:
        f.write(genesis_data.encode("UTF-16"))

    initialize_node_environment(name=name, base_dir=base_dir)

    n = Node(name=name, basedirpath=base_dir)
    looper.add(n)
Пример #20
0
def test_complex_target(tmpdir, looper):
    """
        Test what happens if target is a json object instead of a String
    """
    base_dir = str(tmpdir)
    name = "Node1"
    ledger_file = 'pool_transactions_sandbox'

    gen_txn = list(SAMPLE_GEN_4_POOL)
    gen_txn[0] = SAMPLE_GEN_NODE_1_COMPLEX_TARGET

    _setup_genesis(base_dir, ledger_file, gen_txn)

    initialize_node_environment(name=name, base_dir=base_dir)

    n = Node(name=name, basedirpath=base_dir)
    looper.add(n)
Пример #21
0
def test_from_current_state(fake_node):
    """
    If from_current_state is True and is initial propagate primary (current viewNo is 0),
    then message should be put into msgsToViewChanger queue with from_current_state flag as True
    """
    frm = 'Node3'
    fake_node.view_changer.view_change_in_progress = False
    current_view = fake_node.view_changer.last_completed_view_no
    proposed_view_no = current_view + 1
    msg = ViewChangeDone(proposed_view_no, frm, fake_node.ledger_summary)
    res = Node.msgHasAcceptableViewNo(fake_node,
                                      msg,
                                      frm,
                                      from_current_state=True)
    msg, frm = fake_node.msgsToViewChanger[0]
    assert len(fake_node.msgsToViewChanger) == 1
    assert msg.from_current_state
    assert res is False
Пример #22
0
 def create(name):
     node = Node(name, nodeReg, basedirpath=td)
     looper.add(node)
     node.startKeySharing()
     nodes.append(node)
def test_dump_additional_info(node):
    Node.dump_additional_info(node)
    file_name = node._info_tool.ADDITIONAL_FILE_NAME_TEMPLATE.format(node_name=node.name.lower())
    file_path = os.path.join(node.node_info_dir, file_name)
    assert os.path.exists(file_path)
Пример #24
0
 and port numbers.
 """
 nodeReg = {
     'Alpha':
     NodeDetail(HA('127.0.0.1', 7560), "AlphaC", HA('127.0.0.1', 7561)),
     'Beta':
     NodeDetail(HA('127.0.0.1', 7562), "BetaC", HA('127.0.0.1', 7563)),
     'Gamma':
     NodeDetail(HA('127.0.0.1', 7564), "GammaC", HA('127.0.0.1', 7565)),
     'Delta':
     NodeDetail(HA('127.0.0.1', 7566), "DeltaC", HA('127.0.0.1', 7567))
 }
 """
 Create a node called Alpha
 """
 alpha = Node('Alpha', nodeReg, basedirpath=tmpdir)
 """
 Add the Alpha node to the looper, so it can be serviced.
 """
 looper.add(alpha)
 """
 Start key sharing among nodes. Key sharing is a way to bootstrap a
 consensus pool when you don't want to manually construct keys
 beforehand. See the github wiki for more details on key sharing.
 """
 alpha.startKeySharing()
 """
 Do the same process for the other nodes. Ordinarily, we would never have
 more than one node on a machine, but this is for demonstration purposes.
 """
 beta = Node('Beta', nodeReg, basedirpath=tmpdir)
Пример #25
0
        default='/etc/indy/indy.env')
    args = parser.parse_args()
    path_to_txns = os.path.realpath(args.infpath)
    path_to_env = os.path.realpath(args.env_file)

    if not os.path.exists(path_to_txns):
        print("Path to txns file does not exist")
        sys.exit(1)

    if not os.path.exists(path_to_env):
        print("Path to env file does not exist")
        sys.exit(1)

    nname, nport, ncliport = get_ha_cliha_node_name(path_to_env)
    ha = HA("0.0.0.0", nport)
    cliha = HA("0.0.0.0", ncliport)
    config_helper = NodeConfigHelper(nname, config)

    node = Node(nname,
                ha=ha,
                cliha=cliha,
                config_helper=config_helper,
                config=config)
    i = 0
    with open(path_to_txns) as txns:
        for txn in txns:
            node.domainLedger.add(json.loads(txn))
            i += 1
            if not i % 1000:
                print("added {} txns".format(i))
Пример #26
0
 def mark_request_as_executed(self, request):
     Node.mark_request_as_executed(self, request)
Пример #27
0
 def num_txns_caught_up_in_last_catchup(self):
     return Node.num_txns_caught_up_in_last_catchup(self)
Пример #28
0
 def allLedgersCaughtUp(self):
     Node.allLedgersCaughtUp(self)
Пример #29
0
def test_dump_additional_info(node):
    Node.dump_additional_info(node)
    file_name = node._info_tool.ADDITIONAL_FILE_NAME_TEMPLATE.format(
        node_name=node.name.lower())
    file_path = os.path.join(node.node_info_dir, file_name)
    assert os.path.exists(file_path)
Пример #30
0
        """
        nodeReg = {
            'Alpha': NodeDetail(HA('127.0.0.1', 7560), "AlphaC",
                                HA('127.0.0.1', 7561)),
            'Beta': NodeDetail(HA('127.0.0.1', 7562), "BetaC",
                               HA('127.0.0.1', 7563)),
            'Gamma': NodeDetail(HA('127.0.0.1', 7564), "GammaC",
                                HA('127.0.0.1', 7565)),
            'Delta': NodeDetail(HA('127.0.0.1', 7566), "DeltaC",
                                HA('127.0.0.1', 7567))
        }

        """
        Create a node called Alpha
        """
        alpha = Node('Alpha', nodeReg, basedirpath=tmpdir)

        """
        Add the Alpha node to the looper, so it can be serviced.
        """
        looper.add(alpha)

        """
        Start key sharing among nodes. Key sharing is a way to bootstrap a
        consensus pool when you don't want to manually construct keys
        beforehand. See the github wiki for more details on key sharing.
        """
        alpha.startKeySharing()

        """
        Do the same process for the other nodes. Ordinarily, we would never have
Пример #31
0
 def __init__(self, *args, **kwargs):
     Node.__init__(self, *args, **kwargs)
     TestNodeCore.__init__(self)
    args = parser.parse_args()
    path_to_txns = os.path.realpath(args.infpath)
    path_to_env = os.path.realpath(args.env_file)

    if not os.path.exists(path_to_txns):
        print("Path to txns file does not exist")
        sys.exit(1)

    if not os.path.exists(path_to_env):
        print("Path to env file does not exist")
        sys.exit(1)

    nname, nport, ncliport = get_ha_cliha_node_name(path_to_env)
    ha = HA("0.0.0.0", nport)
    cliha = HA("0.0.0.0", ncliport)
    config_helper = NodeConfigHelper(nname, config)

    node = Node(nname,
                nodeRegistry=None,
                ha=ha,
                cliha=cliha,
                config_helper=config_helper,
                config=config)
    i = 0
    with open(path_to_txns) as txns:
        for txn in txns:
            node.domainLedger.add(json.loads(txn))
            i += 1
            if not i % 1000:
                print("added {} txns".format(i))
Пример #33
0
def test_no_drops(node):
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 3
    assert node.propagates_phase_req_timeouts == 0
    assert node.ordering_phase_req_timeouts == 0
Пример #34
0
 def __init__(self, *args, **kwargs):
     Node.__init__(self, *args, **kwargs)
     TestNodeCore.__init__(self)
 def allLedgersCaughtUp(self):
     Node.allLedgersCaughtUp(self)
Пример #36
0
 def _clean_non_forwarded_ordered(self):
     return Node._clean_non_forwarded_ordered(self)
 def _clean_non_forwarded_ordered(self):
     return Node._clean_non_forwarded_ordered(self)
Пример #38
0
 def set_view_change_status(self, value):
     return Node.set_view_change_status(self, value)
 def num_txns_caught_up_in_last_catchup(self):
     return Node.num_txns_caught_up_in_last_catchup(self)
 def mark_request_as_executed(self, request):
     Node.mark_request_as_executed(self, request)
Пример #41
0
def test_no_drops(node):
    Node.check_outdated_reqs(node)
    assert len(node.requests) == 3
    assert node.propagates_phase_req_timeouts == 0
    assert node.ordering_phase_req_timeouts == 0