def test_new_view_message_is_not_sent_by_non_primary_when_view_change_certificate_is_reached(
        internal_bus, external_bus, validators, primary,
        view_change_service_builder, initial_view_no, some_item, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    next_view_no = initial_view_no + 1
    primary_name = primary(next_view_no)
    non_primary_name = some_item(validators, exclude=[primary_name])
    service = view_change_service_builder(non_primary_name)

    # start view change
    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    # receive quorum of ViewChanges and ViewChangeAcks
    non_primaries = [item for item in validators if item != primary_name]
    vc = create_view_change(initial_view_no)
    for vc_frm in non_primaries:
        external_bus.process_incoming(
            vc, generateName(vc_frm, service._data.inst_id))
        for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries):
            external_bus.process_incoming(
                ack, generateName(ack_frm, service._data.inst_id))

    # check that NewView hasn't been sent
    assert all(not isinstance(msg, NewView)
               for msg in external_bus.sent_messages)
def test_new_view_message_is_sent_by_primary_when_view_change_certificate_is_reached(
        internal_bus, external_bus, validators, primary,
        view_change_service_builder, initial_view_no, view_change_acks,
        is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    primary_name = primary(initial_view_no + 1)
    service = view_change_service_builder(primary_name)

    # start view change
    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    # receive quorum of ViewChanges and ViewChangeAcks
    non_primaries = [item for item in validators if item != primary_name]
    vc = create_view_change(initial_view_no)

    for vc_frm in non_primaries:
        external_bus.process_incoming(
            vc, generateName(vc_frm, service._data.inst_id))
        for ack, ack_frm in view_change_acks(vc, vc_frm, primary_name,
                                             len(validators) - 2):
            external_bus.process_incoming(
                ack, generateName(ack_frm, service._data.inst_id))

    # check that NewView has been sent
    assert len(external_bus.sent_messages) == 1
    msg, dst = external_bus.sent_messages[0]
    assert dst is None  # message was broadcast
    assert isinstance(msg, NewView)
    assert msg.viewNo == initial_view_no + 1
def test_view_change_finished_is_sent_by_non_primary_once_view_change_certificate_is_reached_and_new_view_from_primary(
        internal_bus, external_bus, validators, primary,
        view_change_service_builder, initial_view_no, some_item, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    handler = Mock()
    internal_bus.subscribe(NewViewAccepted, handler)

    next_view_no = initial_view_no + 1
    primary_name = primary(next_view_no)
    non_primary_name = some_item(validators, exclude=[primary_name])
    service = view_change_service_builder(non_primary_name)
    vc = create_view_change(initial_view_no)
    service._data.preprepared = vc.preprepared
    service._data.prepared = vc.prepared
    service._data.stable_checkpoint = vc.stableCheckpoint
    service._data.checkpoints = vc.checkpoints
    old_data = copy_shared_data(service._data)

    # start view change
    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    # receive quorum of ViewChanges and ViewChangeAcks
    non_primaries = [item for item in validators if item != primary_name]
    non_primaries = random.sample(non_primaries,
                                  service._data.quorums.view_change.value)
    new_view = create_new_view_from_vc(vc, non_primaries)
    for vc_frm in non_primaries:
        external_bus.process_incoming(
            vc, generateName(vc_frm, service._data.inst_id))
        for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries):
            external_bus.process_incoming(
                ack, generateName(ack_frm, service._data.inst_id))

        # check that NewViewAccepted hasn't been sent if NewView is from non-primary
        external_bus.process_incoming(
            new_view, generateName(non_primary_name, service._data.inst_id))
    handler.assert_not_called()
    assert service._data.view_no == initial_view_no + 1
    assert service._data.waiting_for_new_view

    # check that NewViewAccepted has been sent if NewView is from primary
    external_bus.process_incoming(
        new_view, generateName(primary_name, service._data.inst_id))
    expected_finish_vc = NewViewAccepted(view_no=initial_view_no + 1,
                                         view_changes=new_view.viewChanges,
                                         checkpoint=new_view.checkpoint,
                                         batches=new_view.batches)
    handler.assert_called_with(expected_finish_vc)

    # check that shared data is updated
    new_data = copy_shared_data(service._data)
    check_service_changed_only_owned_fields_in_shared_data(
        ViewChangeService, old_data, new_data)
    assert service._data.view_no == initial_view_no + 1
    assert not service._data.waiting_for_new_view
def test_send_instance_change_on_new_view_with_incorrect_checkpoint(
        internal_bus, external_bus, validators, primary,
        view_change_service_builder, initial_view_no, some_item, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    next_view_no = initial_view_no + 1
    primary_name = primary(next_view_no)
    non_primary_name = some_item(validators, exclude=[primary_name])
    service = view_change_service_builder(non_primary_name)

    vc = create_view_change(initial_view_no)
    service._data.preprepared = vc.preprepared
    service._data.prepared = vc.prepared
    service._data.stable_checkpoint = vc.stableCheckpoint
    service._data.checkpoints = vc.checkpoints

    # start view change
    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    # receive quorum of ViewChanges and ViewChangeAcks
    non_primaries = [item for item in validators if item != primary_name]
    non_primaries = random.sample(non_primaries,
                                  service._data.quorums.view_change.value)
    for vc_frm in non_primaries:
        external_bus.process_incoming(
            vc, generateName(vc_frm, service._data.inst_id))
        for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries):
            external_bus.process_incoming(
                ack, generateName(ack_frm, service._data.inst_id))

    cp = Checkpoint(instId=0,
                    viewNo=initial_view_no,
                    seqNoStart=0,
                    seqNoEnd=1000,
                    digest=cp_digest(1000))
    new_view = create_new_view_from_vc(vc, non_primaries, checkpoint=cp)

    # send NewView by Primary
    init_network_msg_count = len(external_bus.sent_messages)
    external_bus.process_incoming(
        new_view, generateName(primary_name, service._data.inst_id))

    # we don't go to new view, just send Instance Change
    assert service._data.view_no == initial_view_no + 1
    assert init_network_msg_count + 1 == len(external_bus.sent_messages)
    msg, dst = external_bus.sent_messages[-1]
    assert dst is None  # broadcast
    assert isinstance(msg, InstanceChange)
    assert msg.viewNo == initial_view_no + 2
    assert msg.reason == Suspicions.NEW_VIEW_INVALID_CHECKPOINTS.code
Esempio n. 5
0
    def process_need_view_change(self, msg: NeedViewChange):
        logger.info("{} processing {}".format(self, msg))

        # 1. calculate new viewno
        view_no = msg.view_no
        if view_no is None:
            view_no = self._data.view_no + 1

        # 2. Do cleanup before new view change starts
        self._clean_on_view_change_start()

        # 3. Update shared data
        self._data.view_no = view_no
        self._data.waiting_for_new_view = True
        old_primary = self._data.primary_name
        self._data.primary_name = None
        if not self._data.is_master:
            self._data.master_reordered_after_vc = False
            return

        # Only the master primary is selected at the beginning of view change as we need to get a NEW_VIEW and do re-ordering on master
        # Backup primaries will not be selected (and backups will not order) until re-ordering of txns from previous view on master is finished
        # More precisely, it will be done after the first batch in a new view is committed
        # This is done so as N and F may change as a result of NODE txns ordered in last view,
        # so we need a synchronous point of updating N, F, number of replicas and backup primaris
        # Beginning of view (when the first batch in a view is ordered) is such a point.
        self._data.primary_name = generateName(
            self._primaries_selector.select_master_primary(self._data.view_no),
            self._data.inst_id)

        if old_primary and self._data.primary_name == old_primary:
            logger.info("Selected master primary is the same with the "
                        "current master primary (new_view {}). "
                        "Propose a new view {}".format(self._data.view_no,
                                                       self._data.view_no + 1))
            self._propose_view_change(Suspicions.INCORRECT_NEW_PRIMARY)

        logger.info(
            "{} started view change to view {}. Expected Master Primary: {}".
            format(self._data.name, self._data.view_no,
                   self._data.primary_name))

        # 4. Build ViewChange message
        vc = self._build_view_change_msg()

        # 5. Send ViewChangeStarted via internal bus to update other services
        logger.info("{} sending {}".format(self, vc))
        self._bus.send(ViewChangeStarted(view_no=self._data.view_no))

        # 6. Send ViewChange msg to other nodes (via external bus)
        self._network.send(vc)
        self.view_change_votes.add_view_change(vc, self._data.name)

        # 7. Unstash messages for view change
        self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE)
        self._stashed_vc_msgs.clear()

        # 8. Restart instance change timer
        self._resend_inst_change_timer.stop()
        self._resend_inst_change_timer.start()
def test_send_reply_on_old_view_pre_prepares_request(
        external_bus, orderer, initial_view_no, stored_old_view_pre_prepares,
        requested_old_view_pre_prepares):
    # Setup
    orderer._data.view_no = initial_view_no + 2

    orderer._update_old_view_preprepares(stored_old_view_pre_prepares)

    # Receive OldViewPrePrepareRequest req
    batches = [
        preprepare_to_batch_id(pp) for pp in requested_old_view_pre_prepares
    ]
    req = OldViewPrePrepareRequest(0, batches)
    frm = "node1"
    orderer._network.process_incoming(req,
                                      generateName(frm, orderer._data.inst_id))

    # Check that OldViewPrePrepareReply is sent  for all requested PrePrepares
    if not orderer.is_master:
        assert len(external_bus.sent_messages) == 0
        return
    # equal to set's union operation
    expected_pps = [
        i for i in stored_old_view_pre_prepares
        if i in requested_old_view_pre_prepares
    ]
    expected_pps = sorted(expected_pps, key=lambda pp: pp.ppSeqNo)
    check_reply_old_view_preprepares_sent(external_bus, frm, expected_pps)
Esempio n. 7
0
    def __init__(self,
                 node_count: int = 4,
                 random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random,
                                   self._serialize_deserialize)
        self._nodes = []
        validators = genNodeNames(node_count)
        # ToDo: maybe it should be a random too?
        primary_name = validators[0]

        genesis_txns = create_pool_txn_data(
            node_names=validators,
            crypto_factory=create_default_bls_crypto_factory(),
            get_free_port=partial(random.integer, 9000, 9999))['txns']

        for name in validators:
            # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form
            replica_name = generateName(name, 0)
            handler = partial(self.network._send_message, replica_name)
            write_manager = create_test_write_req_manager(name, genesis_txns)
            write_manager.node_reg_handler.node_reg_at_beginning_of_view[
                0] = validators
            replica = ReplicaService(replica_name,
                                     validators,
                                     primary_name,
                                     self._timer,
                                     InternalBus(),
                                     self.network.create_peer(name, handler),
                                     write_manager=write_manager,
                                     bls_bft_replica=MockBlsBftReplica())
            replica.config.NEW_VIEW_TIMEOUT = 30 * 1000
            self._nodes.append(replica)
def test_non_primary_responds_to_view_change_message_with_view_change_ack_to_new_primary(
        internal_bus, external_bus, some_item, other_item, validators, primary,
        view_change_service_builder, initial_view_no, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    next_view_no = initial_view_no + 1
    non_primary_name = some_item(validators, exclude=[primary(next_view_no)])
    service = view_change_service_builder(non_primary_name)

    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    vc = create_view_change(initial_view_no)
    frm = other_item(validators, exclude=[non_primary_name])
    external_bus.process_incoming(vc, generateName(frm, service._data.inst_id))

    assert len(external_bus.sent_messages) == 1
    msg, dst = external_bus.sent_messages[0]
    assert dst == [getNodeName(service._data.primary_name)]
    assert isinstance(msg, ViewChangeAck)
    assert msg.viewNo == vc.viewNo
    assert msg.name == frm
    assert msg.digest == view_change_digest(vc)
Esempio n. 9
0
    def add_new_node(self, name):
        if name not in self.validators:
            self.validators.append(name)

        # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form
        replica_name = generateName(name, 0)
        handler = partial(self.network._send_message, replica_name)
        write_manager = create_test_write_req_manager(name, self._genesis_txns)
        write_manager.node_reg_handler.committed_node_reg_at_beginning_of_view[0] = self._genesis_validators
        write_manager.node_reg_handler.uncommitted_node_reg_at_beginning_of_view[0] = self._genesis_validators
        _internal_bus = InternalBus()
        self._internal_buses[name] = _internal_bus
        self._subscribe_to_internal_msgs(name)
        replica = ReplicaService(replica_name,
                                 self.validators,
                                 self._primary_name,
                                 self._timer,
                                 _internal_bus,
                                 self.network.create_peer(name, handler),
                                 write_manager=write_manager,
                                 bls_bft_replica=MockBlsBftReplica())
        replica._data.node_mode = Mode.participating
        self._nodes.append(replica)
        self._update_connecteds()
        logger.info("Node {} was added into pool".format(name))
def test_do_not_send_instance_change_on_timeout_when_multiple_view_change_finished_on_time(internal_bus, external_bus,
                                                                                           validators,
                                                                                           primary,
                                                                                           view_change_service_builder,
                                                                                           timer,
                                                                                           initial_view_no,
                                                                                           is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    primary_name = primary(initial_view_no + 2)
    service = view_change_service_builder(primary_name)

    # start first view change
    internal_bus.send(NeedViewChange())

    # start second view change
    internal_bus.send(NeedViewChange())
    external_bus.sent_messages.clear()

    # receive quorum of ViewChanges and ViewChangeAcks
    non_primaries = [item for item in validators if item != primary_name]
    vc = create_view_change(initial_view_no + 1)
    service._data.checkpoints.append(Checkpoint(instId=0,
                                                viewNo=initial_view_no + 1,
                                                seqNoStart=0,
                                                seqNoEnd=DEFAULT_STABLE_CHKP,
                                                digest=cp_digest(DEFAULT_STABLE_CHKP)))
    for vc_frm in non_primaries:
        external_bus.process_incoming(vc, generateName(vc_frm, service._data.inst_id))
        for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries):
            external_bus.process_incoming(ack, generateName(ack_frm, service._data.inst_id))

    # check that view change is finished
    assert service._data.view_no == initial_view_no + 2
    assert not service._data.waiting_for_new_view
    assert len(external_bus.sent_messages) == 1
    msg, dst = external_bus.sent_messages[0]
    assert isinstance(msg, NewView)

    # make sure view change hasn't been started again
    timer.sleep(service._config.NEW_VIEW_TIMEOUT + 1)
    assert service._data.view_no == initial_view_no + 2
    assert len(external_bus.sent_messages) == 1
    msg, dst = external_bus.sent_messages[0]
    assert isinstance(msg, NewView)
Esempio n. 11
0
    def process_need_view_change(self, msg: NeedViewChange):
        self._logger.info("{} processing {}".format(self, msg))

        # 1. calculate new viewno
        view_no = msg.view_no
        if view_no is None:
            view_no = self._data.view_no + 1

        # 2. Do cleanup before new view change starts
        self._clean_on_view_change_start()

        # 3. Update shared data
        self._data.view_no = view_no
        self._data.waiting_for_new_view = True
        self._data.primaries = self._primaries_selector.select_primaries(
            view_no=self._data.view_no,
            instance_count=self._data.quorums.f + 1,
            validators=self._data.validators)
        for i, primary_name in enumerate(self._data.primaries):
            self._logger.display(
                "{} selected primary {} for instance {} (view {})".format(
                    PRIMARY_SELECTION_PREFIX, primary_name, i,
                    self._data.view_no),
                extra={
                    "cli": "ANNOUNCE",
                    "tags": ["node-election"]
                })

        old_primary = self._data.primary_name
        self._data.primary_name = generateName(
            self._data.primaries[self._data.inst_id], self._data.inst_id)

        if not self._data.is_master:
            return

        if old_primary and self._data.primary_name == old_primary:
            self._logger.info("Selected master primary is the same with the "
                              "current master primary (new_view {}). "
                              "Propose a new view {}".format(
                                  self._data.view_no, self._data.view_no + 1))
            self._propose_view_change(Suspicions.INCORRECT_NEW_PRIMARY.code)

        # 4. Build ViewChange message
        vc = self._build_view_change_msg()

        # 5. Send ViewChangeStarted via internal bus to update other services
        self._logger.info("{} sending {}".format(self, vc))
        self._bus.send(ViewChangeStarted(view_no=self._data.view_no))

        # 6. Send ViewChange msg to other nodes (via external bus)
        self._network.send(vc)
        self.view_change_votes.add_view_change(vc, self._data.name)

        # 7. Unstash messages for view change
        self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE)

        # 8. Restart instance change timer
        self._resend_inst_change_timer.stop()
        self._resend_inst_change_timer.start()
 def check_backup_primaries():
     assert delayed_node.replicas[BACKUP_INST_ID]._consensus_data.primary_name is None
     assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
     assert all(
         n.replicas[BACKUP_INST_ID]._consensus_data.primary_name == generateName(delayed_node.name,
                                                                                 instId=BACKUP_INST_ID)
         for n in fast_nodes
     )
Esempio n. 13
0
    def __init__(self,
                 name: str,
                 validators: List[str],
                 primary_name: str,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 write_manager: WriteRequestManager,
                 bls_bft_replica: BlsBftReplica = None):
        # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared?
        self._internal_bus = bus
        self._data = ConsensusSharedData(name, validators, 0)
        self._data.primary_name = generateName(primary_name,
                                               self._data.inst_id)
        self.config = getConfig()
        self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT,
                                      buses=[bus, network])
        self._write_manager = write_manager
        self._primaries_selector = RoundRobinNodeRegPrimariesSelector(
            self._write_manager.node_reg_handler)
        self._orderer = OrderingService(
            data=self._data,
            timer=timer,
            bus=bus,
            network=network,
            write_manager=self._write_manager,
            bls_bft_replica=bls_bft_replica,
            freshness_checker=FreshnessChecker(
                freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL),
            primaries_selector=self._primaries_selector,
            stasher=self.stasher)
        self._checkpointer = CheckpointService(self._data, bus, network,
                                               self.stasher,
                                               write_manager.database_manager)
        self._view_changer = ViewChangeService(self._data, timer, bus, network,
                                               self.stasher,
                                               self._primaries_selector)
        self._message_requestor = MessageReqService(self._data, bus, network)

        self._add_ledgers()

        # TODO: This is just for testing purposes only
        self._data.checkpoints.append(
            Checkpoint(instId=0,
                       viewNo=0,
                       seqNoStart=0,
                       seqNoEnd=0,
                       digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))

        # ToDo: it should be done in Zero-view stage.
        write_manager.on_catchup_finished()
        self._data.primaries = self._view_changer._primaries_selector.select_primaries(
            self._data.view_no)

        # ToDo: ugly way to understand node_reg changing
        self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg

        bus.subscribe(Ordered, self.emulate_ordered_processing)
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators,
                             inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    return OrderingServiceMsgValidator(data=cd)
def validator(view_no):
    validators = genNodeNames(4)
    inst_id = 0
    cd = ConsensusSharedData(generateName(validators[0], inst_id), validators, inst_id, True)
    cd.pp_seq_no = 1
    cd.view_no = view_no
    cd.node_mode = Mode.participating
    cd.node_status = Status.started
    cd.prev_view_prepare_cert = cd.last_ordered_3pc[1]
    return OrderingServiceMsgValidator(data=cd)
def test_process_preprepare_on_old_view_pre_prepares_reply(
        external_bus, internal_bus, orderer, is_primary, initial_view_no,
        pre_prepares):
    # !!!SETUP!!!
    orderer._data.view_no = initial_view_no + 1
    new_view = create_new_view(
        initial_view_no=initial_view_no,
        stable_cp=200,
        batches=create_batches_from_preprepares(pre_prepares))
    orderer._data.new_view_votes.add_new_view(new_view,
                                              orderer._data.primary_name)
    orderer._data.prev_view_prepare_cert = new_view.batches[-1].pp_seq_no

    # !!!EXECUTE!!!
    rep = OldViewPrePrepareReply(0, [pp._asdict() for pp in pre_prepares])
    orderer._network.process_incoming(
        rep, generateName("node1", orderer._data.inst_id))

    # !!!CHECK!!!
    if not orderer.is_master:
        # no re-ordering is expected on non-master
        assert orderer._data.preprepared == []
        assert orderer._data.prepared == []
        return

    # check that PPs were added
    assert orderer._data.preprepared == [
        BatchID(view_no=initial_view_no + 1,
                pp_view_no=pp.viewNo,
                pp_seq_no=pp.ppSeqNo,
                pp_digest=pp.digest) for pp in pre_prepares
    ]

    # check that sent_preprepares is updated in case of Primary and prePrepares in case of non-primary
    updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
    non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
    for pp in pre_prepares:
        new_pp = updateNamedTuple(pp,
                                  viewNo=initial_view_no + 1,
                                  originalViewNo=pp.viewNo)
        assert (initial_view_no + 1,
                new_pp.ppSeqNo) in updated_prepares_collection
        assert updated_prepares_collection[(initial_view_no + 1,
                                            new_pp.ppSeqNo)] == new_pp
    assert not non_updated_prepares_collection

    # check that Prepare is sent in case of non primary
    if not is_primary:
        check_prepares_sent(external_bus, pre_prepares, initial_view_no + 1)
    else:
        assert len(external_bus.sent_messages) == 0

    # we don't have a quorum of Prepares yet
    assert orderer._data.prepared == []
def test_primary_doesnt_respond_to_view_change_message(
        some_item, validators, primary, external_bus, view_change_service_builder, initial_view_no,
        view_change_message, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    name = primary(initial_view_no + 1)
    service = view_change_service_builder(name)

    vc = create_view_change(initial_view_no)
    frm = some_item(validators, exclude=[name])
    external_bus.process_incoming(vc, generateName(frm, service._data.inst_id))

    assert len(external_bus.sent_messages) == 0
    def _finish_view_change(self):
        # Update shared data
        self._data.waiting_for_new_view = False
        self._data.prev_view_prepare_cert = self._data.new_view.batches[-1].pp_seq_no \
            if self._data.new_view.batches else self._data.new_view.checkpoint.seqNoEnd
        if f.PRIMARY.nm in self._data.new_view:
            self._data.primary_name = generateName(self._data.new_view.primary,
                                                   self._data.inst_id)

        logger.info("{} finished view change to view {}. Master Primary: {}".format(self._data.name,
                                                                                    self._data.view_no,
                                                                                    self._data.primary_name))
        # Cancel View Change timeout task
        self._resend_inst_change_timer.stop()
        # send message to other services
        self._bus.send(NewViewAccepted(view_no=self._data.new_view.viewNo,
                                       view_changes=self._data.new_view.viewChanges,
                                       checkpoint=self._data.new_view.checkpoint,
                                       batches=self._data.new_view.batches))
        self.last_completed_view_no = self._data.view_no
Esempio n. 19
0
    def __init__(self,
                 node_count: int = 4,
                 random: Optional[SimRandom] = None):
        self._random = random if random else DefaultSimRandom()
        self._timer = MockTimer()
        self._network = SimNetwork(self._timer, self._random,
                                   self._serialize_deserialize)
        self._nodes = []
        validators = genNodeNames(node_count)
        # ToDo: maybe it should be a random too?
        primary_name = validators[0]

        genesis_txns = create_pool_txn_data(
            node_names=validators,
            crypto_factory=create_default_bls_crypto_factory(),
            get_free_port=partial(random.integer, 9000, 9999))['txns']

        for name in validators:
            # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form
            replica_name = generateName(name, 0)
            handler = partial(self.network._send_message, replica_name)
            write_manager = create_test_write_req_manager(name, genesis_txns)
            replica = ReplicaService(replica_name,
                                     validators,
                                     primary_name,
                                     self._timer,
                                     InternalBus(),
                                     self.network.create_peer(name, handler),
                                     write_manager=write_manager,
                                     bls_bft_replica=MockBlsBftReplica())
            # ToDo: For now, future_primary_handler is depended from the node.
            # And for now we need to patching set_node_state functionality
            future_primaries_handler = FuturePrimariesBatchHandler(
                write_manager.database_manager,
                FakeSomething(nodeReg={}, nodeIds=[]))
            future_primaries_handler._get_primaries = lambda *args, **kwargs: replica._data.primaries
            write_manager.register_batch_handler(future_primaries_handler)
            # ToDo: also, it should be done at the zero-view stage.
            write_manager.future_primary_handler.set_node_state()
            replica.config.NEW_VIEW_TIMEOUT = 30 * 1000
            self._nodes.append(replica)
Esempio n. 20
0
    def process_need_view_change(self, msg: NeedViewChange):
        self._logger.info("{} processing {}".format(self, msg))

        # 1. calculate new viewno
        view_no = msg.view_no
        if view_no is None:
            view_no = self._data.view_no + 1

        # 2. Do cleanup before new view change starts
        self._clean_on_view_change_start()

        # 3. Update shared data
        self._data.view_no = view_no
        self._data.waiting_for_new_view = True
        self._data.primaries = self._primaries_selector.select_primaries(
            view_no=self._data.view_no,
            instance_count=self._data.quorums.f + 1,
            validators=self._data.validators)
        self._data.primary_name = generateName(
            self._data.primaries[self._data.inst_id], self._data.inst_id)

        if not self._data.is_master:
            return

        # 4. Build ViewChange message
        vc = self._build_view_change_msg()

        # 5. Send ViewChangeStarted via internal bus to update other services
        self._logger.info("{} sending {}".format(self, vc))
        self._bus.send(ViewChangeStarted(view_no=self._data.view_no))

        # 6. Send ViewChange msg to other nodes (via external bus)
        self._network.send(vc)
        self.view_change_votes.add_view_change(vc, self._data.name)

        # 7. Unstash messages for view change
        self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE)

        # 8. Restart instance change timer
        self._resend_inst_change_timer.stop()
        self._resend_inst_change_timer.start()
def sender_orderer(primary_orderer, sender, inst_id):
    return generateName(sender, inst_id)
Esempio n. 22
0
 def _data(name):
     data = ConsensusSharedData(generateName(name, 0), validators, 0, is_master)
     data.view_no = initial_view_no
     return data
Esempio n. 23
0
    def __init__(self, name: str, validators: List[str], primary_name: str,
                 timer: TimerService, bus: InternalBus, network: ExternalBus,
                 write_manager: WriteRequestManager,
                 bls_bft_replica: BlsBftReplica = None):
        # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared?
        self._network = network
        self._data = ConsensusSharedData(name, validators, 0)
        self._data.primary_name = generateName(primary_name, self._data.inst_id)
        self._timer = timer
        self.config = getConfig()
        self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT, buses=[bus, network])
        self._write_manager = write_manager
        self._primaries_selector = RoundRobinNodeRegPrimariesSelector(self._write_manager.node_reg_handler)

        self._freshness_checker = FreshnessChecker(freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL)
        for ledger_id in [POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID]:
            self._freshness_checker.register_ledger(ledger_id=ledger_id,
                                                    initial_time=self.get_time_for_3pc_batch())

        self._orderer = OrderingService(data=self._data,
                                        timer=self._timer,
                                        bus=bus,
                                        network=network,
                                        write_manager=self._write_manager,
                                        bls_bft_replica=bls_bft_replica,
                                        freshness_checker=self._freshness_checker,
                                        get_time_for_3pc_batch=self.get_time_for_3pc_batch,
                                        stasher=self.stasher)
        self._checkpointer = CheckpointService(self._data, bus, network, self.stasher,
                                               write_manager.database_manager)
        self._view_changer = ViewChangeService(self._data, self._timer, bus, network, self.stasher, self._primaries_selector)
        self._view_change_trigger = ViewChangeTriggerService(data=self._data,
                                                             timer=self._timer,
                                                             bus=bus,
                                                             network=network,
                                                             db_manager=write_manager.database_manager,
                                                             is_master_degraded=lambda: False,
                                                             stasher=self.stasher)
        self._primary_connection_monitor = PrimaryConnectionMonitorService(data=self._data,
                                                                           timer=self._timer,
                                                                           bus=bus,
                                                                           network=network)
        self._freshness_monitor = FreshnessMonitorService(data=self._data,
                                                          timer=self._timer,
                                                          bus=bus,
                                                          network=network,
                                                          freshness_checker=self._freshness_checker,
                                                          get_time_for_3pc_batch=self.get_time_for_3pc_batch)
        self._message_requestor = MessageReqService(self._data, bus, network)

        self._add_ledgers()

        # TODO: This is just for testing purposes only
        self._data.checkpoints.append(
            Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0,
                       digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))

        # ToDo: it should be done in Zero-view stage.
        write_manager.on_catchup_finished()

        # Simulate node behavior
        self._internal_bus = bus
        self._internal_bus.subscribe(NodeNeedViewChange, self.process_node_need_view_change)
        self._internal_bus.subscribe(Ordered, self.emulate_ordered_processing)

        # ToDo: ugly way to understand node_reg changing
        self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg
Esempio n. 24
0
 def _data(name):
     data = ConsensusSharedData(generateName(name, 0), validators, 0,
                                is_master)
     data.view_no = initial_view_no
     data.checkpoints.update(initial_checkpoints)
     return data