def orderer(
    _orderer,
    is_primary,
):
    # ToDo: For now, future_primary_handler is depended from the node.
    # And for now we need to patching set_node_state functionality
    write_manager = _orderer._write_manager
    future_primaries_handler = FuturePrimariesBatchHandler(
        write_manager.database_manager,
        FakeSomething(nodeReg={},
                      nodeIds=[],
                      primaries=_orderer._data.primaries))
    write_manager.register_batch_handler(future_primaries_handler)

    _orderer._validator = OrderingServiceMsgValidator(_orderer._data)
    _orderer.name = 'Alpha:0'
    _orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name

    def _apply_and_validate_applied_pre_prepare_fake(pp, sender):
        global applied_pre_prepares
        applied_pre_prepares += 1

    _orderer._can_process_pre_prepare = lambda pp, sender: None
    _orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake

    return _orderer
def test_re_order_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle):
    # 0. use new 3PC validator
    for n in txnPoolNodeSet:
        ordering_service = n.master_replica._ordering_service
        ordering_service._validator = OrderingServiceMsgValidator(
            ordering_service._data)

    # 1. drop Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    # 2. simulate view change start so that
    # all PrePrepares/Prepares/Commits are cleared
    # and uncommitted txns are reverted
    for n in txnPoolNodeSet:
        n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
        master_ordering_service = n.master_replica._ordering_service
        assert not master_ordering_service.prePrepares
        assert not master_ordering_service.prepares
        assert not master_ordering_service.commits
        assert master_ordering_service.old_view_preprepares
        ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
        state = n.db_manager.states[DOMAIN_LEDGER_ID]
        assert len(ledger.uncommittedTxns) == 0
        assert ledger.uncommitted_root_hash == ledger.tree.root_hash
        assert state.committedHead == state.head

    # 3. Simulate View Change finish to re-order the same PrePrepare
    assert lagging_node.master_last_ordered_3PC == (0, 0)
    new_master = txnPoolNodeSet[1]
    batches = sorted([
        preprepare_to_batch_id(pp) for _, pp in new_master.master_replica.
        _ordering_service.old_view_preprepares.items()
    ])
    new_view_msg = NewViewCheckpointsApplied(view_no=0,
                                             view_changes=[],
                                             checkpoint=None,
                                             batches=batches)
    for n in txnPoolNodeSet:
        n.master_replica._consensus_data.prev_view_prepare_cert = batches[
            -1].pp_seq_no
        n.master_replica._ordering_service._bus.send(new_view_msg)

    # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
    # the request was eventually ordered on Node4 as well
    waitNodeDataEquality(looper, lagging_node, *other_nodes)
    assert lagging_node.master_last_ordered_3PC == (0, 4)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators,
                             inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    return OrderingServiceMsgValidator(data=cd)
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators, inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    cd.node_status = Status.started
    cd.prev_view_prepare_cert = cd.last_ordered_3pc[1]
    return OrderingServiceMsgValidator(data=cd)
Exemple #5
0
def test_not_order_already_ordered(o, pre_prepare, prepare, commit):
    o._validator = OrderingServiceMsgValidator(o._data)
    o.process_preprepare(pre_prepare, PRIMARY_NAME)
    o.last_ordered_3pc = (commit.viewNo, commit.ppSeqNo + 1)
    for i in range(o._data.quorums.prepare.value):
        if o._data.validators[i] + ":0" != o.name:
            o.process_prepare(prepare, o._data.validators[i])

    assert not o.ordered
    for i in range(o._data.quorums.n):
        o.process_commit(commit, o._data.validators[i])
    assert not o.ordered
def orderer(_orderer, is_primary):
    _orderer._validator = OrderingServiceMsgValidator(_orderer._data)
    _orderer.name = 'Alpha:0'
    _orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name

    def _apply_and_validate_applied_pre_prepare_fake(pp, sender):
        global applied_pre_prepares
        applied_pre_prepares += 1

    _orderer._can_process_pre_prepare = lambda pp, sender: None
    _orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake

    return _orderer
Exemple #7
0
    def __init__(self,
                 name: str,
                 validators: List[str],
                 primary_name: str,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 write_manager: WriteRequestManager,
                 bls_bft_replica: BlsBftReplica = None):
        # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared?
        self._data = ConsensusSharedData(name, validators, 0)
        self._data.primary_name = generateName(primary_name,
                                               self._data.inst_id)
        self.config = getConfig()
        self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT,
                                      buses=[bus, network])
        self._write_manager = write_manager
        self._orderer = OrderingService(
            data=self._data,
            timer=timer,
            bus=bus,
            network=network,
            write_manager=self._write_manager,
            bls_bft_replica=bls_bft_replica,
            freshness_checker=FreshnessChecker(
                freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL),
            stasher=self.stasher)
        self._orderer._validator = OrderingServiceMsgValidator(
            self._orderer._data)
        self._checkpointer = CheckpointService(self._data, bus, network,
                                               self.stasher,
                                               write_manager.database_manager)
        self._view_changer = ViewChangeService(self._data, timer, bus, network,
                                               self.stasher)
        self._message_requestor = MessageReqService(self._data, bus, network)

        self._add_ledgers()

        # TODO: This is just for testing purposes only
        self._data.checkpoints.append(
            Checkpoint(instId=0,
                       viewNo=0,
                       seqNoStart=0,
                       seqNoEnd=0,
                       digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))

        # ToDo: it should be done in Zero-view stage.
        self._data.primaries = self._view_changer._primaries_selector.select_primaries(
            self._data.view_no,
            getMaxFailures(len(validators)) + 1, validators)
Exemple #8
0
def test_process_ordered_commit(o, pre_prepare, prepare, commit):
    o._validator = OrderingServiceMsgValidator(o._data)
    o.process_preprepare(pre_prepare, PRIMARY_NAME)
    o.last_ordered_3pc = (commit.viewNo, commit.ppSeqNo + 1)
    for i in range(o._data.quorums.prepare.value):
        if o._data.validators[i] + ":0" != o.name:
            o.process_prepare(prepare, o._data.validators[i])

    assert o.commits[(commit.viewNo,
                      commit.ppSeqNo)] == ThreePhaseVotes(voters={o.name},
                                                          msg=commit)
    sender = o._data.validators[-1]
    o.process_commit(commit, sender)
    assert o.commits[(commit.viewNo, commit.ppSeqNo)] == ThreePhaseVotes(
        voters={o.name, sender}, msg=commit)
Exemple #9
0
def txnPoolNodeSet(txnPoolNodeSet):
    for node in txnPoolNodeSet:
        for replica in node.replicas.values():
            replica._ordering_service._validator = OrderingServiceMsgValidator(replica._consensus_data)
        node._view_changer.start_view_change = partial(trigger_view_change_on_node, node)
    yield txnPoolNodeSet
Exemple #10
0
def validator(consensus_data, view_no):
    cd = consensus_data("For3PCValidator")
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    return OrderingServiceMsgValidator(data=cd)