Example #1
0
def view_change_service(internal_bus, external_bus, timer, stasher,
                        validators):
    # TODO: Use validators fixture
    data = ConsensusSharedData("some_name", genNodeNames(4), 0)
    primaries_selector = RoundRobinConstantNodesPrimariesSelector(validators)
    return ViewChangeService(data, timer, internal_bus, external_bus, stasher,
                             primaries_selector)
Example #2
0
def test_nodes(mock_timer):
    random = DefaultSimRandom()
    net = SimNetwork(mock_timer, random)
    return [
        TestNode(name, net.create_peer(name))
        for name in genNodeNames(NODE_COUNT)
    ]
Example #3
0
    def __init__(self,
                 names: Iterable[str] = None,
                 count: int = None,
                 nodeReg=None,
                 tmpdir=None,
                 keyshare=True,
                 primaryDecider=None,
                 pluginPaths: Iterable[str] = None,
                 testNodeClass=TestNode):

        super().__init__()
        self.tmpdir = tmpdir
        self.keyshare = keyshare
        self.primaryDecider = primaryDecider
        self.pluginPaths = pluginPaths

        self.testNodeClass = testNodeClass
        self.nodes = OrderedDict()  # type: Dict[str, TestNode]
        # Can use just self.nodes rather than maintaining a separate dictionary
        # but then have to pluck attributes from the `self.nodes` so keeping
        # it simple a the cost of extra memory and its test code so not a big
        # deal
        if nodeReg:
            self.nodeReg = nodeReg
        else:
            nodeNames = (names if names is not None and count is None else
                         genNodeNames(count) if count is not None else error(
                             "only one of either names or count is required"))
            self.nodeReg = genNodeReg(
                names=nodeNames)  # type: Dict[str, NodeDetail]
        for name in self.nodeReg.keys():
            self.addNode(name)
        # The following lets us access the nodes by name as attributes of the
        # NodeSet. It's not a problem unless a node name shadows a member.
        self.__dict__.update(self.nodes)
Example #4
0
    def __init__(self,
                 names: Iterable[str] = None,
                 count: int = None,
                 nodeReg=None,
                 tmpdir=None,
                 keyshare=True,
                 primaryDecider=None,
                 opVerificationPluginPath=None):
        super().__init__()

        self.tmpdir = tmpdir

        self.primaryDecider = primaryDecider
        self.opVerificationPluginPath = opVerificationPluginPath

        self.nodes = OrderedDict()  # type: Dict[str, TestNode]
        # Can use just self.nodes rather than maintaining a separate dictionary
        # but then have to pluck attributes from the `self.nodes` so keeping
        # it simple a the cost of extra memory and its test code so not a big
        # deal

        if nodeReg:
            self.nodeReg = nodeReg
        else:
            nodeNames = (names if names is not None and count is None else
                         genNodeNames(count) if count is not None else
                         error("only one of either names or count is required"))
            self.nodeReg = genNodeReg(
                    names=nodeNames)  # type: Dict[str, NodeDetail]
        for name in self.nodeReg.keys():
            node = self.addNode(name)

        # The following lets us access the nodes by name as attributes of the
        # NodeSet. It's not a problem unless a node name shadows a member.
        self.__dict__.update(self.nodes)
Example #5
0
    def __init__(self,
                 node_count: int = 4,
                 random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random,
                                   self._serialize_deserialize)
        self._nodes = []
        self._genesis_txns = None
        self._genesis_validators = genNodeNames(node_count)
        self.validators = self._genesis_validators
        # ToDo: maybe it should be a random too?
        self._primary_name = self._genesis_validators[0]
        self._internal_buses = {}
        self._node_votes = {}
        self._ports = self._random.sample(range(9000, 9999),
                                          2 * len(self._genesis_validators))
        # ToDo: need to remove after implementation catchup_service (INDY-2148)
        #  and when we can change pool after NODE txn
        self._expected_node_reg = self.validators

        # Actions
        self._generate_genensis_txns()

        # Create nodes from genesis
        for name in self._genesis_validators:
            self.add_new_node(name)
Example #6
0
    def __init__(self,
                 node_count: int = 4,
                 random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random,
                                   self._serialize_deserialize)
        self._nodes = []
        validators = genNodeNames(node_count)
        # ToDo: maybe it should be a random too?
        primary_name = validators[0]

        genesis_txns = create_pool_txn_data(
            node_names=validators,
            crypto_factory=create_default_bls_crypto_factory(),
            get_free_port=partial(random.integer, 9000, 9999))['txns']

        for name in validators:
            # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form
            replica_name = generateName(name, 0)
            handler = partial(self.network._send_message, replica_name)
            write_manager = create_test_write_req_manager(name, genesis_txns)
            write_manager.node_reg_handler.node_reg_at_beginning_of_view[
                0] = validators
            replica = ReplicaService(replica_name,
                                     validators,
                                     primary_name,
                                     self._timer,
                                     InternalBus(),
                                     self.network.create_peer(name, handler),
                                     write_manager=write_manager,
                                     bls_bft_replica=MockBlsBftReplica())
            replica.config.NEW_VIEW_TIMEOUT = 30 * 1000
            self._nodes.append(replica)
Example #7
0
def test_nodes(sim_network, mock_timer, random):
    names = [name for name in genNodeNames(NODE_COUNT)]
    names = random.shuffle(names)
    return [
        TestNode(name, mock_timer, sim_network.create_peer(name))
        for name in names
    ]
Example #8
0
def poolTxnData(request):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    data = {'txns': [], 'seeds': {}}
    for i, node_name in zip(range(1, nodeCount + 1), genNodeNames(nodeCount)):
        data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
        steward_name = 'Steward' + str(i)
        data['seeds'][steward_name] = steward_name + \
            '0' * (32 - len(steward_name))
        n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
        s_idr = SimpleSigner(
            seed=data['seeds'][steward_name].encode()).identifier
        data['txns'].append({
            TXN_TYPE: NYM,
            ROLE: STEWARD,
            ALIAS: steward_name,
            TARGET_NYM: s_idr
        })
        data['txns'].append({
            TXN_TYPE: NODE,
            f.IDENTIFIER.nm: s_idr,
            TARGET_NYM: n_idr,
            DATA: {
                ALIAS: node_name,
                SERVICES: [VALIDATOR],
                NODE_IP: '127.0.0.1',
                NODE_PORT: genHa()[1],
                CLIENT_IP: '127.0.0.1',
                CLIENT_PORT: genHa()[1]
            }
        })

    # Below is some static data that is needed for some CLI tests
    more_data = {'txns': [
        {"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
         "dest": "4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ",
         "type": "1",
         "alias": "Alice"},
        {"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
         "dest": "46Kq4hASUdvUbwR7s7Pie3x8f4HRB3NLay7Z9jh9eZsB",
         "type": "1",
         "alias": "Jason"},
        {"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
         "dest": "3wpYnGqceZ8DzN3guiTd9rrYkWTwTHCChBSuo6cvkXTG",
         "type": "1",
         "alias": "John"},
        {"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
         "dest": "4Yk9HoDSfJv9QcmJbLcXdWVgS7nfvdUqiVcvbSu8VBru",
         "type": "1",
         "alias": "Les"}
    ], 'seeds': {
        "Alice": "99999999999999999999999999999999",
        "Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
        "John": "dddddddddddddddddddddddddddddddd",
        "Les": "ffffffffffffffffffffffffffffffff"
    }}

    data['txns'].extend(more_data['txns'])
    data['seeds'].update(more_data['seeds'])
    return data
Example #9
0
def poolTxnData(request):
    node_count = getValueFromModule(request, "nodeCount", 4)
    nodes_with_bls = getValueFromModule(request, "nodes_wth_bls", node_count)
    node_names = genNodeNames(node_count)
    return create_pool_txn_data(node_names=node_names,
                                crypto_factory=create_default_bls_crypto_factory(),
                                get_free_port=lambda: genHa()[1],
                                nodes_with_bls=nodes_with_bls)
Example #10
0
def poolTxnData(request):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    nodes_with_bls = getValueFromModule(request, "nodes_wth_bls", nodeCount)
    data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
    for i, node_name in zip(range(1, nodeCount + 1), genNodeNames(nodeCount)):
        data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
        steward_name = 'Steward' + str(i)
        data['seeds'][steward_name] = steward_name + \
            '0' * (32 - len(steward_name))

        n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
        s_idr = DidSigner(seed=data['seeds'][steward_name].encode())

        data['txns'].append({
            TXN_TYPE: NYM,
            ROLE: STEWARD,
            ALIAS: steward_name,
            TARGET_NYM: s_idr.identifier,
            VERKEY: s_idr.verkey,
        })
        node_txn = {
            TXN_TYPE: NODE,
            f.IDENTIFIER.nm: s_idr.identifier,
            TARGET_NYM: n_idr,
            DATA: {
                ALIAS: node_name,
                SERVICES: [VALIDATOR],
                NODE_IP: '127.0.0.1',
                NODE_PORT: genHa()[1],
                CLIENT_IP: '127.0.0.1',
                CLIENT_PORT: genHa()[1],
            }
        }

        if i <= nodes_with_bls:
            _, bls_key = create_default_bls_crypto_factory().generate_bls_keys(
                seed=data['seeds'][node_name])
            node_txn[DATA][BLS_KEY] = bls_key
            data['nodesWithBls'][node_name] = True

        data['txns'].append(node_txn)

    more_data_seeds = \
        {
            "Alice": "99999999999999999999999999999999",
            "Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
            "John": "dddddddddddddddddddddddddddddddd",
            "Les": "ffffffffffffffffffffffffffffffff"
        }
    more_data_users = []
    for more_name, more_seed in more_data_seeds.items():
        signer = DidSigner(seed=more_seed.encode())
        more_data_users.append({TXN_TYPE: NYM, ALIAS: more_name, TARGET_NYM: signer.identifier, VERKEY: signer.verkey,
                                f.IDENTIFIER.nm: "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC"})

    data['txns'].extend(more_data_users)
    data['seeds'].update(more_data_seeds)
    return data
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators,
                             inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    return OrderingServiceMsgValidator(data=cd)
Example #12
0
 def __init__(self,
              node_count: int = 4,
              random: Optional[SimRandom] = None):
     self._random = random if random else DefaultSimRandom()
     self._timer = MockTimer()
     self._network = SimNetwork(self._timer, self._random)
     self._nodes = [
         ReplicaService(name, self.network.create_peer(name))
         for name in genNodeNames(node_count)
     ]
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators, inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    cd.node_status = Status.started
    cd.prev_view_prepare_cert = cd.last_ordered_3pc[1]
    return OrderingServiceMsgValidator(data=cd)
Example #14
0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)

    with pytest.raises(PublicKeyNotFoundOnDisk):
        with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func,
                         keyshare=False) as nodes:
            with Looper(nodes) as looper:
                looper.runFor(2)
Example #15
0
def view_change_trigger_service(internal_bus, external_bus, timer, stasher, validators):
    # TODO: Use validators fixture
    data = ConsensusSharedData("some_name", genNodeNames(4), 0)
    data.node_mode = Mode.participating
    data.node_status = Status.started
    return ViewChangeTriggerService(data=data,
                                    timer=timer,
                                    bus=internal_bus,
                                    network=external_bus,
                                    db_manager=DatabaseManager(),
                                    stasher=stasher,
                                    is_master_degraded=lambda: False)
Example #16
0
 def __init__(self,
              node_count: int = 4,
              random: Optional[SimRandom] = None):
     self._random = random if random else DefaultSimRandom()
     self._timer = MockTimer()
     self._network = SimNetwork(self._timer, self._random)
     validators = genNodeNames(node_count)
     primary_name = validators[0]
     self._nodes = [
         ReplicaService(name, validators, primary_name, self._timer,
                        InternalBus(), self.network.create_peer(name))
         for name in validators
     ]
Example #17
0
def primary_connection_monitor_service(internal_bus, external_bus, timer):
    # TODO: Use validators fixture
    nodes = genNodeNames(4)
    data = ConsensusSharedData("some_name", nodes, 0)
    data.node_mode = Mode.participating
    data.node_status = Status.started
    data.primary_name = nodes[0]
    service = PrimaryConnectionMonitorService(data=data,
                                              timer=timer,
                                              bus=internal_bus,
                                              network=external_bus)
    internal_bus.send(PrimarySelected())
    return service
Example #18
0
def test_even_compare():
    vals = genNodeNames(24)
    for v1 in vals:
        beats = [v2 for v2 in vals if v1 != v2 and evenCompare(v1, v2)]
        print("{} beats {} others: {}".format(v1, len(beats), beats))
    evenCompare('Zeta', 'Alpha')

    def hashit(s):
        b = s.encode('utf-8')
        c = crypto_hash_sha256(b)
        return c.hex()

    for v in vals:
        print("{}: {}".format(v, hashit(v)))
Example #19
0
def test_even_compare():
    vals = genNodeNames(24)
    for v1 in vals:
        beats = [v2 for v2 in vals if v1 != v2 and evenCompare(v1, v2)]
        print("{} beats {} others: {}".format(v1, len(beats), beats))
    evenCompare('Zeta', 'Alpha')

    def hashit(s):
        b = s.encode('utf-8')
        c = crypto_hash_sha256(b)
        return c.hex()

    for v in vals:
        print("{}: {}".format(v, hashit(v)))
Example #20
0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func, keyshare=False) as nodes:
        with Looper(nodes) as looper:
            try:
                looper.run(checkNodesConnected(nodes, RemoteState(None, None, None)))
            except RemoteNotFound:
                pass
            except KeyError as ex:
                assert [n for n in nodeNames if n == ex.args[0]]
            except Exception:
                raise
Example #21
0
def test_distributedConnectionMap():
    for nodeCount in range(2, 25):
        print("testing for node count: {}".format(nodeCount))
        names = genNodeNames(nodeCount)
        conmap = distributedConnectionMap(names)

        total_combinations = len(list(combinations(names, 2)))
        total_combinations_in_map = sum(len(x) for x in conmap.values())
        assert total_combinations_in_map == total_combinations

        maxPer = math.ceil(total_combinations / nodeCount)
        minPer = math.floor(total_combinations / nodeCount)
        for x in conmap.values():
            assert len(x) <= maxPer
            assert len(x) >= minPer
Example #22
0
def test_distributedConnectionMap():
    for nodeCount in range(2, 25):
        print("testing for node count: {}".format(nodeCount))
        names = genNodeNames(nodeCount)
        conmap = distributedConnectionMap(names)

        total_combinations = len(list(combinations(names, 2)))
        total_combinations_in_map = sum(len(x) for x in conmap.values())
        assert total_combinations_in_map == total_combinations

        maxPer = math.ceil(total_combinations / nodeCount)
        minPer = math.floor(total_combinations / nodeCount)
        for x in conmap.values():
            assert len(x) <= maxPer
            assert len(x) >= minPer
Example #23
0
def testRequestReturnToNodeWhenPrePrepareNotReceivedByOneNode(tdir_for_func):
    """Test no T-3"""
    nodeNames = genNodeNames(7)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)
            logger.debug("Add the seven nodes back in")
            # Every node except A delays self nomination so A can become primary
            nodeA = addNodeBack(nodeSet, looper, nodeNames[0])
            for i in range(1, 7):
                node = addNodeBack(nodeSet, looper, nodeNames[i])
                node.delaySelfNomination(15)

            nodeB = nodeSet.getNode(nodeNames[1])
            # Node B delays PREPREPARE from node A(which would be the primary)
            # for a long time.
            nodeB.nodeIbStasher.delay(
                delayerMsgTuple(120, PrePrepare, nodeA.name))

            # Ensure elections are done
            ensureElectionsDone(looper=looper,
                                nodes=nodeSet,
                                retryWait=1,
                                timeout=30)
            assert nodeA.hasPrimary

            instNo = nodeA.primaryReplicaNo
            client1, wallet1 = setupClient(looper,
                                           nodeSet,
                                           tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)

            # All nodes including B should return their ordered requests
            for node in nodeSet:
                looper.run(
                    eventually(checkRequestReturnedToNode,
                               node,
                               wallet1.defaultId,
                               req.reqId,
                               req.digest,
                               instNo,
                               retryWait=1,
                               timeout=30))

            # Node B should not have received the PRE-PREPARE request yet
            replica = nodeB.replicas[instNo]  # type: Replica
            assert len(replica.prePrepares) == 0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func,
                     keyshare=False) as nodes:
        with Looper(nodes) as looper:
            try:
                looper.run(checkNodesConnected(nodes, NOT_CONNECTED))
            except RemoteNotFound:
                pass
            except KeyError as ex:
                assert [n for n in nodeNames if n == ex.args[0]]
            except Exception:
                raise
Example #25
0
    def __init__(self, node_count: int = 4, random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random)
        validators = genNodeNames(node_count)
        primary_name = validators[0]

        genesis_txns = create_pool_txn_data(
            node_names=validators,
            crypto_factory=create_default_bls_crypto_factory(),
            get_free_port=partial(random.integer, 9000, 9999))['txns']

        self._nodes = [ReplicaService(name, validators, primary_name,
                                      self._timer, InternalBus(), self.network.create_peer(name),
                                      write_manager=create_test_write_req_manager(name, genesis_txns))
                       for name in validators]
Example #26
0
def genNodeReg(count=None, names=None) -> Dict[str, NodeDetail]:
    """

    :param count: number of nodes, mutually exclusive with names
    :param names: iterable with names of nodes, mutually exclusive with count
    :return: dictionary of name: (node stack HA, client stack name, client stack HA)
    """
    if names is None:
        names = genNodeNames(count)
    nodeReg = OrderedDict(
            (n, NodeDetail(genHa(), n + CLIENT_STACK_SUFFIX, genHa())) for n in
            names)

    def extractCliNodeReg(self):
        return OrderedDict((n.cliname, n.cliha) for n in self.values())

    nodeReg.extractCliNodeReg = types.MethodType(extractCliNodeReg, nodeReg)
    return nodeReg
Example #27
0
def genNodeReg(count=None, names=None) -> Dict[str, NodeDetail]:
    """

    :param count: number of nodes, mutually exclusive with names
    :param names: iterable with names of nodes, mutually exclusive with count
    :return: dictionary of name: (node stack HA, client stack name, client stack HA)
    """
    if names is None:
        names = genNodeNames(count)
    nodeReg = OrderedDict(
        (n, NodeDetail(genHa(), n + CLIENT_STACK_SUFFIX, genHa()))
        for n in names)

    def extractCliNodeReg(self):
        return OrderedDict((n.cliname, n.cliha) for n in self.values())

    nodeReg.extractCliNodeReg = types.MethodType(extractCliNodeReg, nodeReg)
    return nodeReg
Example #28
0
    def __init__(self,
                 node_count: int = 4,
                 random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random,
                                   self._serialize_deserialize)
        self._nodes = []
        validators = genNodeNames(node_count)
        # ToDo: maybe it should be a random too?
        primary_name = validators[0]

        genesis_txns = create_pool_txn_data(
            node_names=validators,
            crypto_factory=create_default_bls_crypto_factory(),
            get_free_port=partial(random.integer, 9000, 9999))['txns']

        for name in validators:
            # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form
            replica_name = generateName(name, 0)
            handler = partial(self.network._send_message, replica_name)
            write_manager = create_test_write_req_manager(name, genesis_txns)
            replica = ReplicaService(replica_name,
                                     validators,
                                     primary_name,
                                     self._timer,
                                     InternalBus(),
                                     self.network.create_peer(name, handler),
                                     write_manager=write_manager,
                                     bls_bft_replica=MockBlsBftReplica())
            # ToDo: For now, future_primary_handler is depended from the node.
            # And for now we need to patching set_node_state functionality
            future_primaries_handler = FuturePrimariesBatchHandler(
                write_manager.database_manager,
                FakeSomething(nodeReg={}, nodeIds=[]))
            future_primaries_handler._get_primaries = lambda *args, **kwargs: replica._data.primaries
            write_manager.register_batch_handler(future_primaries_handler)
            # ToDo: also, it should be done at the zero-view stage.
            write_manager.future_primary_handler.set_node_state()
            replica.config.NEW_VIEW_TIMEOUT = 30 * 1000
            self._nodes.append(replica)
Example #29
0
def testRequestReturnToNodeWhenPrePrepareNotReceivedByOneNode(tdir_for_func):
    """Test no T-3"""
    nodeNames = genNodeNames(7)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)
            logging.debug("Add the seven nodes back in")
            # Every node except A delays self nomination so A can become primary
            nodeA = addNodeBack(nodeSet, looper, nodeNames[0])
            for i in range(1, 7):
                node = addNodeBack(nodeSet, looper, nodeNames[i])
                node.delaySelfNomination(15)

            nodeB = nodeSet.getNode(nodeNames[1])
            # Node B delays PREPREPARE from node A(which would be the primary)
            # for a long time.
            nodeB.nodeIbStasher.delay(
                delayerMsgTuple(120, PrePrepare, nodeA.name))

            # Ensure elections are done
            ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1,
                                timeout=30)
            assert nodeA.hasPrimary

            instNo = nodeA.primaryReplicaNo
            client1 = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            req = sendRandomRequest(client1)

            # All nodes including B should return their ordered requests
            for node in nodeSet:
                looper.run(eventually(checkRequestReturnedToNode, node,
                                      client1.defaultIdentifier, req.reqId,
                                      req.digest,
                                      instNo, retryWait=1, timeout=30))

            # Node B should not have received the PRE-PREPARE request yet
            replica = nodeB.replicas[instNo]  # type: Replica
            assert len(replica.prePrepares) == 0
Example #30
0
def create_new_view(initial_view_no, stable_cp, validators=None):
    validators = validators or genNodeNames(4)
    batches = create_batches(initial_view_no)
    vc = create_view_change(initial_view_no, stable_cp, batches)
    return create_new_view_from_vc(vc, validators)
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        tdir_for_func):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """
    nodeCount = 13
    f = 4
    minimumNodesToBeUp = nodeCount - f

    nodeNames = genNodeNames(nodeCount)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:

            # helpers

            def genExpectedStates(connecteds: Iterable[str]):
                return {
                    nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
                    for nn in nodeNames}

            def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                           nodeIdx: int):
                for node in nodeSet.nodes.values():
                    checkNodeRemotes(node,
                                     genExpectedStates(nodeNames[:nodeIdx + 1]))
                    assert node.status == expectedStatus

            def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
                logger.info("Add back the {} node and see status of {}".
                            format(ordinal(nodeIdx + 1), expectedStatus))
                addNodeBack(nodeSet, looper, nodeNames[nodeIdx])

                timeout = waits.expectedNodeStartUpTimeout() + \
                    waits.expectedPoolInterconnectionTime(len(nodeSet))
                looper.run(eventually(checkNodeStatusRemotesAndF,
                                      expectedStatus,
                                      nodeIdx,
                                      retryWait=1, timeout=timeout))

            logger.debug("Sharing keys")
            looper.run(checkNodesConnected(nodeSet))

            logger.debug("Remove all the nodes")
            for n in nodeNames:
                looper.removeProdable(nodeSet.nodes[n])
                nodeSet.removeNode(n, shouldClean=False)

            looper.runFor(10)

            logger.debug("Add nodes back one at a time")
            for i in range(nodeCount):
                nodes = i + 1
                if nodes < minimumNodesToBeUp:
                    expectedStatus = Status.starting
                elif nodes < nodeCount:
                    expectedStatus = Status.started_hungry
                else:
                    expectedStatus = Status.started
                addNodeBackAndCheck(i, expectedStatus)
Example #32
0
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(nodeSet, looper, nodeNames[i])
                for i in range(0, 4))

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            nodeC.delaySelfNomination(30)
            nodeD.delaySelfNomination(30)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # nodeD.nodestack.delay(delayer(20, PRIMARY))

            nodeD.nodeIbStasher.delay(delayerMsgTuple(20, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            request = sendRandomRequest(wal, client1)

            # TODO Rethink this
            instNo = 0

            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode,
                               node,
                               nodeD,
                               request.identifier,
                               request.reqId,
                               retryWait=1,
                               timeout=10))

            # Node D should have 1 pending PRE-PREPARE request
            def assertOnePrePrepare():
                assert len(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 PrePrepare)) == 1

            looper.run(eventually(assertOnePrePrepare, retryWait=1,
                                  timeout=10))

            # Node D should have 2 pending PREPARE requests(from node B and C)

            def assertTwoPrepare():
                assert len(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 Prepare)) == 2

            looper.run(eventually(assertTwoPrepare, retryWait=1, timeout=10))

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(
                    eventually(lambda: assertLength(
                        getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                     reqType), 0),
                               retryWait=1,
                               timeout=20))
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(
                    nodeSet, looper, nodeNames[i]) for i in range(
                    0, 4))

            # Since primary selection is round robin, A and B will be primaries

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            # nodeC.delaySelfNomination(10)
            # nodeD.delaySelfNomination(10)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # delayD = 5
            # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            # request = sendRandomRequest(wal, client1)

            # TODO Rethink this
            instNo = 0

            timeout = waits.expectedClientRequestPropagationTime(len(nodeSet))
            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode, node, nodeD,
                               request.identifier,
                               request.reqId, retryWait=1, timeout=timeout))

            def assert_msg_count(typ, count):
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        typ)) == count

            # Node D should have 1 pending PRE-PREPARE request
            timeout = waits.expectedPrePrepareTime(len(nodeSet))
            looper.run(eventually(assert_msg_count, PrePrepare, 1,
                                  retryWait=1, timeout=timeout))

            # Node D should have 2 pending PREPARE requests(from node B and C)
            timeout = waits.expectedPrepareTime(len(nodeSet))
            looper.run(eventually(assert_msg_count, Prepare, 2, retryWait=1,
                                  timeout=timeout))

            # Its been checked above that replica stashes 3 phase messages in
            # lack of primary, now avoid delay (fix the network)
            nodeD.nodeIbStasher.reset_delays_and_process_delayeds()

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(
                    eventually(
                        lambda: assertLength(
                            getPendingRequestsForReplica(
                                nodeD.replicas[instNo],
                                reqType),
                            0),
                        retryWait=1,
                        timeout=delayD))  # wait little more than delay
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        txnPoolNodeSet, looper, tconf, tdir):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """

    nodeNames = genNodeNames(nodeCount)
    current_node_set = list(txnPoolNodeSet)

    def genExpectedStates(connecteds: Iterable[str]):
        return {
            nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
            for nn in nodeNames}

    def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                   nodeIdx: int):
        for node in current_node_set:
            checkNodeRemotes(node,
                             genExpectedStates(nodeNames[:nodeIdx + 1]))
            assert node.status == expectedStatus

    def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
        logger.info("Add back the {} node and see status of {}".
                    format(ordinal(nodeIdx + 1), expectedStatus))
        addNodeBack(
            current_node_set, looper,
            get_node_by_name(txnPoolNodeSet, nodeNames[nodeIdx]),
            tconf, tdir)
        looper.run(checkNodesConnected(current_node_set))
        timeout = waits.expectedNodeStartUpTimeout() + \
                  waits.expectedPoolInterconnectionTime(len(current_node_set))
        # TODO: Probably it's better to modify waits.* functions
        timeout *= 1.5
        looper.run(eventually(checkNodeStatusRemotesAndF,
                              expectedStatus,
                              nodeIdx,
                              retryWait=1, timeout=timeout))

    logger.debug("Sharing keys")
    looper.run(checkNodesConnected(current_node_set))

    logger.debug("Remove all the nodes")
    for n in nodeNames:
        node_n = get_node_by_name(current_node_set, n)
        disconnect_node_and_ensure_disconnected(looper,
                                                current_node_set,
                                                node_n,
                                                timeout=nodeCount,
                                                stopNode=True)
        looper.removeProdable(node_n)
        current_node_set.remove(node_n)

    # looper.runFor(10)

    logger.debug("Add nodes back one at a time")
    for i in range(nodeCount):
        nodes = i + 1
        if nodes < minimumNodesToBeUp:
            expectedStatus = Status.starting
        elif nodes < nodeCount:
            expectedStatus = Status.started_hungry
        else:
            expectedStatus = Status.started
        addNodeBackAndCheck(i, expectedStatus)
Example #35
0
def poolTxnNodeNames(request, index=""):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    return [n + index for n in genNodeNames(nodeCount)]
Example #36
0
def poolTxnData(request):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    nodes_with_bls = getValueFromModule(request, "nodes_wth_bls", nodeCount)
    data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
    for i, node_name in zip(range(1, nodeCount + 1), genNodeNames(nodeCount)):
        data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
        steward_name = 'Steward' + str(i)
        data['seeds'][steward_name] = steward_name + \
                                      '0' * (32 - len(steward_name))

        n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
        s_idr = DidSigner(seed=data['seeds'][steward_name].encode())

        data['txns'].append(
                Member.nym_txn(nym=s_idr.identifier,
                               verkey=s_idr.verkey,
                               role=STEWARD,
                               name=steward_name,
                               seq_no=i)
        )

        node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
                                    node_name=node_name,
                                    nym=n_idr,
                                    ip='127.0.0.1',
                                    node_port=genHa()[1],
                                    client_port=genHa()[1],
                                    client_ip='127.0.0.1',
                                    services=[VALIDATOR],
                                    seq_no=i)

        if i <= nodes_with_bls:
            _, bls_key, bls_key_proof = create_default_bls_crypto_factory().generate_bls_keys(
                seed=data['seeds'][node_name])
            get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
            get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
            data['nodesWithBls'][node_name] = True

        data['txns'].append(node_txn)

    # Add 4 Trustees
    for i in range(4):
        trustee_name = 'Trs' + str(i)
        data['seeds'][trustee_name] = trustee_name + '0' * (
                32 - len(trustee_name))
        t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
        data['txns'].append(
            Member.nym_txn(nym=t_sgnr.identifier,
                           verkey=t_sgnr.verkey,
                           role=TRUSTEE,
                           name=trustee_name)
        )

    more_data_seeds = \
        {
            "Alice": "99999999999999999999999999999999",
            "Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
            "John": "dddddddddddddddddddddddddddddddd",
            "Les": "ffffffffffffffffffffffffffffffff"
        }
    more_data_users = []
    for more_name, more_seed in more_data_seeds.items():
        signer = DidSigner(seed=more_seed.encode())
        more_data_users.append(
            Member.nym_txn(nym=signer.identifier,
                           verkey=signer.verkey,
                           name=more_name,
                           creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC")
        )

    data['txns'].extend(more_data_users)
    data['seeds'].update(more_data_seeds)
    return data
Example #37
0
def poolTxnData(request):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    nodes_with_bls = getValueFromModule(request, "nodes_wth_bls", nodeCount)
    data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
    for i, node_name in zip(range(1, nodeCount + 1), genNodeNames(nodeCount)):
        data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
        steward_name = 'Steward' + str(i)
        data['seeds'][steward_name] = steward_name + \
                                      '0' * (32 - len(steward_name))

        n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
        s_idr = DidSigner(seed=data['seeds'][steward_name].encode())

        data['txns'].append(
            Member.nym_txn(nym=s_idr.identifier,
                           verkey=s_idr.verkey,
                           role=STEWARD,
                           name=steward_name,
                           seq_no=i))

        node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
                                    node_name=node_name,
                                    nym=n_idr,
                                    ip='127.0.0.1',
                                    node_port=genHa()[1],
                                    client_port=genHa()[1],
                                    client_ip='127.0.0.1',
                                    services=[VALIDATOR],
                                    seq_no=i)

        if i <= nodes_with_bls:
            _, bls_key, bls_key_proof = create_default_bls_crypto_factory(
            ).generate_bls_keys(seed=data['seeds'][node_name])
            get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
            get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
            data['nodesWithBls'][node_name] = True

        data['txns'].append(node_txn)

    # Add 4 Trustees
    for i in range(4):
        trustee_name = 'Trs' + str(i)
        data['seeds'][trustee_name] = trustee_name + '0' * (32 -
                                                            len(trustee_name))
        t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
        data['txns'].append(
            Member.nym_txn(nym=t_sgnr.identifier,
                           verkey=t_sgnr.verkey,
                           role=TRUSTEE,
                           name=trustee_name))

    more_data_seeds = \
        {
            "Alice": "99999999999999999999999999999999",
            "Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
            "John": "dddddddddddddddddddddddddddddddd",
            "Les": "ffffffffffffffffffffffffffffffff"
        }
    more_data_users = []
    for more_name, more_seed in more_data_seeds.items():
        signer = DidSigner(seed=more_seed.encode())
        more_data_users.append(
            Member.nym_txn(
                nym=signer.identifier,
                verkey=signer.verkey,
                name=more_name,
                creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC"))

    data['txns'].extend(more_data_users)
    data['seeds'].update(more_data_seeds)
    return data
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        tdir_for_func):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """
    nodeCount = 16
    f = 5
    minimumNodesToBeUp = 16 - f

    nodeNames = genNodeNames(nodeCount)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:

            for n in nodeSet:
                n.startKeySharing()

            # helpers

            def genExpectedStates(connecteds: Iterable[str]):
                return {
                    nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
                    for nn in nodeNames}

            def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                           nodeIdx: int):
                for node in nodeSet.nodes.values():
                    checkNodeRemotes(node,
                                     genExpectedStates(nodeNames[:nodeIdx + 1]))
                    assert node.status == expectedStatus

            def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
                logging.info("Add back the {} node and see status of {}".
                             format(ordinal(nodeIdx + 1), expectedStatus))
                addNodeBack(nodeSet, looper, nodeNames[nodeIdx])
                looper.run(
                        eventually(checkNodeStatusRemotesAndF, expectedStatus,
                                   nodeIdx,
                                   retryWait=1, timeout=30))

            # tests

            logging.debug("Sharing keys")
            looper.run(checkNodesConnected(nodeSet))

            logging.debug("Remove all the nodes")
            for n in nodeNames:
                looper.removeProdable(nodeSet.nodes[n])
                nodeSet.removeNode(n, shouldClean=False)

            logging.debug("Add nodes back one at a time")
            for i in range(nodeCount):
                nodes = i + 1
                if nodes < minimumNodesToBeUp:
                    expectedStatus = Status.starting
                elif nodes < nodeCount:
                    expectedStatus = Status.started_hungry
                else:
                    expectedStatus = Status.started
                addNodeBackAndCheck(i, expectedStatus)
Example #39
0
def consensus_data_provider():
    validators = genNodeNames(N)
    return ConsensusSharedData("nodeA", validators, 0)
Example #40
0
def poolTxnNodeNames(request, index=""):
    nodeCount = getValueFromModule(request, "nodeCount", 4)
    return [n + index for n in genNodeNames(nodeCount)]
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        txnPoolNodeSet, looper, tconf, tdir):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """

    nodeNames = genNodeNames(nodeCount)
    current_node_set = list(txnPoolNodeSet)

    def genExpectedStates(connecteds: Iterable[str]):
        return {
            nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
            for nn in nodeNames}

    def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                   nodeIdx: int):
        for node in current_node_set:
            checkNodeRemotes(node,
                             genExpectedStates(nodeNames[:nodeIdx + 1]))
            assert node.status == expectedStatus

    def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
        logger.info("Add back the {} node and see status of {}".
                    format(ordinal(nodeIdx + 1), expectedStatus))
        addNodeBack(
            current_node_set, looper,
            get_node_by_name(txnPoolNodeSet, nodeNames[nodeIdx]),
            tconf, tdir)
        looper.run(checkNodesConnected(current_node_set))
        timeout = waits.expectedNodeStartUpTimeout() + \
                  waits.expectedPoolInterconnectionTime(len(current_node_set))
        # TODO: Probably it's better to modify waits.* functions
        timeout *= 1.5
        looper.run(eventually(checkNodeStatusRemotesAndF,
                              expectedStatus,
                              nodeIdx,
                              retryWait=1, timeout=timeout))

    logger.debug("Sharing keys")
    looper.run(checkNodesConnected(current_node_set))

    logger.debug("Remove all the nodes")
    for n in nodeNames:
        node_n = get_node_by_name(current_node_set, n)
        disconnect_node_and_ensure_disconnected(looper,
                                                current_node_set,
                                                node_n,
                                                timeout=nodeCount,
                                                stopNode=True)
        looper.removeProdable(node_n)
        current_node_set.remove(node_n)

    # looper.runFor(10)

    logger.debug("Add nodes back one at a time")
    for i in range(nodeCount):
        nodes = i + 1
        if nodes < minimumNodesToBeUp:
            expectedStatus = Status.starting
        elif nodes < nodeCount:
            expectedStatus = Status.started_hungry
        else:
            expectedStatus = Status.started
        addNodeBackAndCheck(i, expectedStatus)
Example #42
0
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(nodeSet, looper, nodeNames[i]) for i in range(0, 4))

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            nodeC.delaySelfNomination(30)
            nodeD.delaySelfNomination(30)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # nodeD.nodestack.delay(delayer(20, PRIMARY))

            nodeD.nodeIbStasher.delay(delayerMsgTuple(20, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            client1 = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            request = sendRandomRequest(client1)

            # TODO Rethink this
            instNo = 0

            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode, node, nodeD,
                               request.identifier,
                               request.reqId, retryWait=1, timeout=10))

            # Node D should have 1 pending PRE-PREPARE request
            def assertOnePrePrepare():
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        PrePrepare)) == 1

            looper.run(eventually(assertOnePrePrepare, retryWait=1, timeout=10))

            # Node D should have 2 pending PREPARE requests(from node B and C)

            def assertTwoPrepare():
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        Prepare)) == 2

            looper.run(eventually(assertTwoPrepare, retryWait=1, timeout=10))

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(eventually(lambda: assertLength(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 reqType),
                    0), retryWait=1, timeout=20))