Beispiel #1
0
def orderer(consensus_data, internal_bus, external_bus, name, write_manager,
            txn_roots, state_roots, bls_bft_replica, tconf, stasher):
    orderer = OrderingService(
        data=consensus_data(name),
        timer=QueueTimer(),
        bus=internal_bus,
        network=external_bus,
        write_manager=write_manager,
        bls_bft_replica=bls_bft_replica,
        freshness_checker=FreshnessChecker(
            freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL),
        stasher=stasher)
    orderer._data.node_mode = Mode.participating
    orderer._data.primary_name = "Alpha:0"
    orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[
        ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer._revert = lambda *args, **kwargs: None
    orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \
        FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None)
    future_primaries_handler = FuturePrimariesBatchHandler(
        write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[]))
    future_primaries_handler.get_primaries = lambda *args, **kwargs: orderer._data.primaries
    write_manager.register_batch_handler(future_primaries_handler)
    return orderer
Beispiel #2
0
def fake_view_changer(request, tconf):
    node_count = 4
    node_stack = FakeSomething(
        name="fake stack",
        connecteds={"Alpha", "Beta", "Gamma", "Delta"},
        conns={"Alpha", "Beta", "Gamma", "Delta"}
    )
    monitor = FakeSomething(
        isMasterDegraded=lambda: False,
        areBackupsDegraded=lambda: [],
        prettymetrics=''
    )
    node = FakeSomething(
        name="SomeNode",
        timer=QueueTimer(),
        viewNo=request.param,
        quorums=Quorums(getValueFromModule(request, 'nodeCount', default=node_count)),
        nodestack=node_stack,
        utc_epoch=lambda *args: get_utc_epoch(),
        config=tconf,
        monitor=monitor,
        discard=lambda a, b, c, d: print(b),
        primaries_disconnection_times=[None] * getRequiredInstances(node_count),
        master_primary_name='Alpha',
        master_replica=FakeSomething(instId=0),
        nodeStatusDB=None
    )
    view_changer = create_view_changer(node)
    # TODO: This is a hack for tests compatibility, do something better
    view_changer.node = node
    return view_changer
def fake_node(tconf):
    node = FakeSomething(
        config=tconf,
        timer=QueueTimer(),
        nodeStatusDB=None,
        master_replica=FakeSomething(
            inBox=deque(),
            inBoxRouter=Router(),
            logger=FakeSomething(info=lambda *args, **kwargs: True)),
        name="Alpha",
        master_primary_name="Alpha",
        on_view_change_start=lambda *args, **kwargs: True,
        start_catchup=lambda *args, **kwargs: True,
        nodeInBox=deque(),
        nodeMsgRouter=Router(),
        metrics=None,
        process_one_node_message=None,
        quota_control=FakeSomething(node_quota=Quota(count=100, size=100)),
        nodestack=FakeSomething(
            service=lambda *args, **kwargs: eventually(lambda: True)),
        set_view_for_replicas=lambda view_no: None,
        set_view_change_status=lambda view_no: None)
    node.metrics = functools.partial(Node._createMetricsCollector, node)()
    node.process_one_node_message = functools.partial(
        Node.process_one_node_message, node)
    return node
Beispiel #4
0
 def __init__(self, viewNo, quorums, ledger_ids):
     node_names = ["Alpha", "Beta", "Gamma", "Delta"]
     node_stack = FakeSomething(
         name="fake stack",
         connecteds=set(node_names)
     )
     self.replicas = []
     self.viewNo = viewNo
     audit_ledger = FakeSomething(size=0)
     db_manager = DatabaseManager()
     db_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger)
     super().__init__(
         name="fake node",
         ledger_ids=ledger_ids,
         _viewNo=viewNo,
         quorums=quorums,
         nodestack=node_stack,
         utc_epoch=lambda *args: get_utc_epoch(),
         mode=Mode.participating,
         view_change_in_progress=False,
         requests=Requests(),
         onBatchCreated=lambda self, *args, **kwargs: True,
         applyReq=lambda self, *args, **kwargs: True,
         primaries=[],
         get_validators=lambda: [],
         db_manager=db_manager,
         write_manager=FakeSomething(database_manager=db_manager,
                                     apply_request=lambda req, cons_time: None),
         timer=QueueTimer(),
         poolManager=FakeSomething(node_names_ordered_by_rank=lambda: node_names)
     )
Beispiel #5
0
 def __init__(self, viewNo, quorums, ledger_ids):
     node_stack = FakeSomething(
         name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"})
     self.replicas = []
     self.viewNo = viewNo
     super().__init__(name="fake node",
                      ledger_ids=ledger_ids,
                      _viewNo=viewNo,
                      quorums=quorums,
                      nodestack=node_stack,
                      utc_epoch=lambda *args: get_utc_epoch(),
                      mode=Mode.participating,
                      view_change_in_progress=False,
                      pre_view_change_in_progress=False,
                      requests=Requests(),
                      onBatchCreated=lambda self, *args, **kwargs: True,
                      applyReq=lambda self, *args, **kwargs: True,
                      primaries=[],
                      get_validators=lambda: [],
                      db_manager=None,
                      internal_bus=InternalBus(),
                      write_manager=FakeSomething(
                          database_manager=DatabaseManager(),
                          apply_request=lambda req, cons_time: None),
                      timer=QueueTimer())
Beispiel #6
0
def orderer(consensus_data, internal_bus, external_bus, name, write_manager,
            txn_roots, state_roots, bls_bft_replica, tconf, stasher,
            validators):
    orderer = OrderingService(
        data=consensus_data(name),
        timer=QueueTimer(),
        bus=internal_bus,
        network=external_bus,
        write_manager=write_manager,
        bls_bft_replica=bls_bft_replica,
        freshness_checker=FreshnessChecker(
            freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL),
        primaries_selector=RoundRobinConstantNodesPrimariesSelector(
            validators),
        stasher=stasher)
    orderer._data.node_mode = Mode.participating
    orderer._data.primary_name = "Alpha:0"
    orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[
        ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer._revert = lambda *args, **kwargs: None
    orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \
        FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None)
    return orderer
def test_timer_cancel_callback_doesnt_crash_for_nonexistant_callback():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb = Callback()

    # This shouldn't crash
    timer.cancel(cb)

    # Make sure that callback which was scheduled later is still called
    timer.schedule(5, cb)
    ts.value += 6
    timer.service()
    assert cb.call_count == 1

    # And this still shouldn't crash
    timer.cancel(cb)
def test_timer_can_cancel_callback():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb = Callback()

    timer.schedule(5, cb)

    ts.value += 3
    timer.service()
    assert cb.call_count == 0

    timer.cancel(cb)

    ts.value += 3
    timer.service()
    assert cb.call_count == 0
def test_timer_can_cancel_callback_without_touching_other_callbacks():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb1 = Callback()
    cb2 = Callback()
    cb3 = Callback()

    timer.schedule(5, cb1)
    timer.schedule(3, cb2)
    timer.schedule(4, cb3)
    timer.cancel(cb2)

    ts.value += 6
    timer.service()
    assert cb1.call_count == 1
    assert cb2.call_count == 0
    assert cb3.call_count == 1
 def __init__(self,
              name,
              config,
              prepare_to_send,
              metrics,
              mt_outgoing_size,
              timer=None,
              listener=None):
     self._name = name
     self.metrics = metrics
     self.listener = listener
     self._prepare_to_send = prepare_to_send
     self._mt_outgoing_size = mt_outgoing_size
     self._config = config
     self._timer = QueueTimer() if timer is None else timer
     self._pending_client_messages = OrderedDict()
     RepeatingTimer(self._timer, self._config.RESEND_CLIENT_MSG_TIMEOUT,
                    self._send_pending_messages)
     RepeatingTimer(self._timer, self._config.REMOVE_CLIENT_MSG_TIMEOUT,
                    self._remove_old_messages)
def test_timer_can_schedule_callback_twice():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb = Callback()

    timer.schedule(3, cb)
    timer.schedule(5, cb)

    ts.value += 4
    timer.service()
    assert cb.call_count == 1

    ts.value += 4
    timer.service()
    assert cb.call_count == 2
    def __init__(self, tmpdir, config=None):
        node_names = ['Node1', 'Node2', 'Node3', 'Node4']
        self.basedirpath = tmpdir
        self.name = node_names[0]
        self.viewNo = 0
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = node_names
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.poolManager = FakeSomething(
            node_names_ordered_by_rank=lambda: node_names)
        self.mode = Mode.starting
        self.monitor = FakeSomething(isMasterDegraded=lambda: False)
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.quorums = Quorums(self.totalNodes)
        self.nodestack = FakeSomething(connecteds=set(self.allNodeNames))
        self.write_manager = FakeSomething(
            node_reg_handler=NodeRegHandler(self.db_manager))
        self.primaries_selector = RoundRobinConstantNodesPrimariesSelector(
            node_names)
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config)
        }
        self.requiredNumberOfInstances = 2
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
Beispiel #13
0
def orderer(consensus_data, internal_bus, external_bus, name, write_manager, txn_roots, state_roots, bls_bft_replica):
    orderer = OrderingService(data=consensus_data(name),
                              timer=QueueTimer(),
                              bus=internal_bus,
                              network=external_bus,
                              write_manager=write_manager,
                              bls_bft_replica=bls_bft_replica,
                              is_master=is_master)
    orderer._data.is_participating = True
    orderer.primary_name = "Alpha:0"
    orderer.l_txnRootHash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.l_stateRootHash = lambda ledger, to_str=False: state_roots[ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer.l_revert = lambda *args, **kwargs: None
    return orderer
def test_timer_can_schedule_different_callbacks():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb1 = Callback()
    cb2 = Callback()

    timer.schedule(5, cb1)
    timer.schedule(3, cb2)

    ts.value += 4
    timer.service()
    assert cb1.call_count == 0
    assert cb2.call_count == 1

    ts.value += 4
    timer.service()
    assert cb1.call_count == 1
    assert cb2.call_count == 1
def test_timer_triggers_callback_on_time():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb = Callback()

    timer.schedule(5, cb)
    assert cb.call_count == 0

    ts.value += 5
    timer.service()
    assert cb.call_count == 1
def test_primary_names_cleaning(tconf):
    node = FakeSomething(
        name="fake node",
        ledger_ids=[0],
        viewNo=0,
        utc_epoch=get_utc_epoch,
        get_validators=lambda: [],
        db_manager=DatabaseManager(),
        requests=[],
        mode=Mode.participating,
        timer=QueueTimer(),
        quorums=Quorums(4),
        write_manager=None,
        poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
        primaries_selector=RoundRobinConstantNodesPrimariesSelector(
            ["Alpha", "Beta", "Gamma", "Delta"]))
    bls_bft_replica = FakeSomething(gc=lambda *args: None, )

    replica = Replica(node,
                      instId=0,
                      config=tconf,
                      bls_bft_replica=bls_bft_replica)

    replica.primaryName = "Node1:0"
    assert list(replica.primaryNames.items()) == \
           [(0, "Node1:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node2:0"
    assert list(replica.primaryNames.items()) == \
           [(0, "Node1:0"), (1, "Node2:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node3:0"
    assert list(replica.primaryNames.items()) == \
           [(1, "Node2:0"), (2, "Node3:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node4:0"
    assert list(replica.primaryNames.items()) == \
           [(2, "Node3:0"), (3, "Node4:0")]
    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.internal_bus = InternalBus()
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False,
                       config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = create_view_changer(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
def test_timer_can_schedule_and_simultaneously_process_different_callbacks():
    ts = MockTimestamp(0)
    timer = QueueTimer(ts)
    cb1 = Callback()
    cb2 = Callback()

    timer.schedule(5, cb1)
    timer.schedule(3, cb2)

    ts.value += 6
    timer.service()
    assert cb1.call_count == 1
    assert cb2.call_count == 1
def test_ordered_cleaning(tconf):
    global_view_no = 2

    node = FakeSomething(
        name="fake node",
        ledger_ids=[0],
        viewNo=global_view_no,
        utc_epoch=get_utc_epoch,
        get_validators=lambda: [],
        db_manager=DatabaseManager(),
        requests=[],
        mode=Mode.participating,
        timer=QueueTimer(),
        quorums=Quorums(4),
        write_manager=None,
        poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
        primaries_selector=RoundRobinConstantNodesPrimariesSelector(
            ["Alpha", "Beta", "Gamma", "Delta"]))
    bls_bft_replica = FakeSomething(gc=lambda *args: None, )

    replica = Replica(node,
                      instId=0,
                      config=tconf,
                      bls_bft_replica=bls_bft_replica)
    replica._consensus_data.view_no = global_view_no
    total = []

    num_requests_per_view = 3
    for viewNo in range(global_view_no + 1):
        for seqNo in range(num_requests_per_view):
            reqId = viewNo, seqNo
            replica._ordering_service._add_to_ordered(*reqId)
            total.append(reqId)

    # gc is called after stable checkpoint, since no request executed
    # in this test starting it manually
    replica._ordering_service.gc(100)
    # Requests with view lower then previous view
    # should not be in ordered
    assert len(replica._ordering_service.ordered) == len(
        total[num_requests_per_view:])
Beispiel #20
0
def test_primary_names_cleaning(tconf):
    node = FakeSomething(name="fake node",
                         ledger_ids=[0],
                         viewNo=0,
                         utc_epoch=get_utc_epoch,
                         get_validators=lambda: [],
                         internal_bus=InternalBus(),
                         db_manager=DatabaseManager(),
                         requests=[],
                         mode=Mode.participating,
                         timer=QueueTimer(),
                         quorums=Quorums(4),
                         write_manager=None)
    bls_bft_replica = FakeSomething(gc=lambda *args: None, )

    replica = Replica(node,
                      instId=0,
                      config=tconf,
                      bls_bft_replica=bls_bft_replica)

    replica.primaryName = "Node1:0"
    assert list(replica.primaryNames.items()) == \
           [(0, "Node1:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node2:0"
    assert list(replica.primaryNames.items()) == \
           [(0, "Node1:0"), (1, "Node2:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node3:0"
    assert list(replica.primaryNames.items()) == \
           [(1, "Node2:0"), (2, "Node3:0")]

    node.viewNo += 1
    replica._consensus_data.view_no = node.viewNo
    replica.primaryName = "Node4:0"
    assert list(replica.primaryNames.items()) == \
           [(2, "Node3:0"), (3, "Node4:0")]
Beispiel #21
0
 def __init__(self, get_current_time: Optional[MockTimestamp] = None):
     self._ts = get_current_time if get_current_time else MockTimestamp(0)
     QueueTimer.__init__(self, self._ts)
class ClientMessageProvider:
    def __init__(self,
                 name,
                 config,
                 prepare_to_send,
                 metrics,
                 mt_outgoing_size,
                 timer=None,
                 listener=None):
        self._name = name
        self.metrics = metrics
        self.listener = listener
        self._prepare_to_send = prepare_to_send
        self._mt_outgoing_size = mt_outgoing_size
        self._config = config
        self._timer = QueueTimer() if timer is None else timer
        self._pending_client_messages = OrderedDict()
        RepeatingTimer(self._timer, self._config.RESEND_CLIENT_MSG_TIMEOUT,
                       self._send_pending_messages)
        RepeatingTimer(self._timer, self._config.REMOVE_CLIENT_MSG_TIMEOUT,
                       self._remove_old_messages)

    def transmit_through_listener(self, msg,
                                  ident) -> Tuple[bool, Optional[str]]:
        self._pending_client_messages.setdefault(ident, []).append(
            (self._timer.get_current_time(), msg))
        if len(self._pending_client_messages
               ) > self._config.PENDING_CLIENT_LIMIT:
            self._pending_client_messages.popitem(last=False)
        if len(self._pending_client_messages[ident]
               ) > self._config.PENDING_MESSAGES_FOR_ONE_CLIENT_LIMIT:
            self._pending_client_messages[ident].pop(0)
        result = True
        error_msg = None
        for timestamp, current_msg in list(
                self._pending_client_messages[ident]):
            result, error_msg, need_to_resend = self._transmit_one_msg_throughlistener(
                current_msg, ident)
            if not need_to_resend:
                self._remove_message(ident, (timestamp, current_msg))
        return result, error_msg

    def _send_pending_messages(self):
        result = True
        error_msg = None
        for ident in list(self._pending_client_messages.keys()):
            for timestamp, current_msg in list(
                    self._pending_client_messages[ident]):
                if self._timer.get_current_time(
                ) - timestamp >= self._config.RESEND_CLIENT_MSG_TIMEOUT:
                    result, error_msg, need_to_resend = self._transmit_one_msg_throughlistener(
                        current_msg, ident)
                    if not need_to_resend:
                        self._remove_message(ident, (timestamp, current_msg))
        return result, error_msg

    def _transmit_one_msg_throughlistener(
            self, msg, ident) -> Tuple[bool, Optional[str], bool]:
        def prepare_error_msg(ex):
            err_str = '{}{} got error {} while sending through listener to {}' \
                .format(CONNECTION_PREFIX, self, ex, ident)
            logger.warning(err_str)
            return err_str

        need_to_resend = False
        if isinstance(ident, str):
            ident = ident.encode()
        try:
            msg = self._prepare_to_send(msg)
            logger.trace(
                '{} transmitting {} to {} through listener socket'.format(
                    self, msg, ident))
            self.metrics.add_event(self._mt_outgoing_size, len(msg))
            self.listener.send_multipart([ident, msg], flags=zmq.NOBLOCK)
        except InvalidMessageExceedingSizeException as ex:
            err_str = '{}Cannot transmit message. Error {}'.format(
                CONNECTION_PREFIX, ex)
            logger.warning(err_str)
            return False, err_str, need_to_resend
        except zmq.Again as ex:
            need_to_resend = True
            return False, prepare_error_msg(ex), need_to_resend
        except zmq.ZMQError as ex:
            need_to_resend = (ex.errno == 113)
            return False, prepare_error_msg(ex), need_to_resend
        except Exception as ex:
            return False, prepare_error_msg(ex), need_to_resend
        return True, None, need_to_resend

    def _remove_old_messages(self):
        for ident in list(self._pending_client_messages.keys()):
            for timestamp, current_msg in list(
                    self._pending_client_messages[ident]):
                if self._timer.get_current_time(
                ) - timestamp >= self._config.REMOVE_CLIENT_MSG_TIMEOUT:
                    self._remove_message(ident, (timestamp, current_msg))

    def _remove_message(self, ident, msg):
        self._pending_client_messages[ident].remove(msg)
        if not self._pending_client_messages[ident]:
            self._pending_client_messages.pop(ident)

    def __repr__(self):
        return self._name