Exemplo n.º 1
0
    def select_primaries(self, view_no: int, instance_count: int,
                         validators: List[str]) -> List[str]:
        # Select primaries for current view_no
        if instance_count == 0:
            return []

        # Build a set of names of primaries, it is needed to avoid
        # duplicates of primary nodes for different replicas.
        primaries = []
        master_primary = None

        for i in range(instance_count):
            if i == 0:
                primary_name = self._next_primary_node_name_for_master(
                    view_no, validators)
                master_primary = primary_name
            else:
                primary_name = self._next_primary_node_name_for_backup(
                    master_primary, validators, primaries)

            primaries.append(primary_name)
            logger.display(
                "{} selected primary {} for instance {} (view {})".format(
                    PRIMARY_SELECTION_PREFIX, primary_name, i, view_no),
                extra={
                    "cli": "ANNOUNCE",
                    "tags": ["node-election"]
                })
        if len(primaries) != instance_count:
            raise LogicError('instances inconsistency')

        if len(primaries) != len(set(primaries)):
            raise LogicError('repeating instances')

        return primaries
Exemplo n.º 2
0
 def on_catchup_complete(self):
     if not self.provider.is_node_synced():
         raise LogicError('on_catchup_complete can be called only after catchup completed')
     if not self.provider.is_primary() is None:
         raise LogicError('Primary on master replica cannot be elected yet')
     self._send_view_change_done_message()
     self._start_selection()
Exemplo n.º 3
0
    def select_primaries(self, view_no: int, instance_count: int,
                         validators: List[str]) -> List[str]:
        # Select primaries for current view_no
        if instance_count == 0:
            return []

        # Build a set of names of primaries, it is needed to avoid
        # duplicates of primary nodes for different replicas.
        primaries = []
        master_primary = None

        for i in range(instance_count):
            if i == 0:
                primary_name = self._next_primary_node_name_for_master(
                    view_no, validators)
                master_primary = primary_name
            else:
                primary_name = self._next_primary_node_name_for_backup(
                    master_primary, validators, primaries)

            primaries.append(primary_name)
        if len(primaries) != instance_count:
            raise LogicError('instances inconsistency')

        if len(primaries) != len(set(primaries)):
            raise LogicError('repeating instances')

        return primaries
Exemplo n.º 4
0
    def _validateAttrib(self, req: Request):
        origin = req.identifier
        op = req.operation

        if not (not op.get(TARGET_NYM) or
                self.hasNym(op[TARGET_NYM], isCommitted=False)):
            raise InvalidClientRequest(origin, req.reqId,
                                       '{} should be added before adding '
                                       'attribute for it'.
                                       format(TARGET_NYM))

        is_owner = self.idrCache.getOwnerFor(op[TARGET_NYM],
                                             isCommitted=False) == origin
        field = None
        value = None
        for key in (RAW, ENC, HASH):
            if key in op:
                field = key
                value = op[key]
                break
        if field is None or value is None:
            raise LogicError('Attribute data cannot be empty')

        get_key = None
        if field == RAW:
            try:
                get_key = attrib_raw_data_serializer.deserialize(value)
                if len(get_key) == 0:
                    raise InvalidClientRequest(origin, req.reqId,
                                               '"row" attribute field must contain non-empty dict'.
                                               format(TARGET_NYM))
                get_key = next(iter(get_key.keys()))
            except JSONDecodeError:
                raise InvalidClientRequest(origin, req.reqId,
                                           'Attribute field must be dict while adding it as a row field'.
                                           format(TARGET_NYM))
        else:
            get_key = value

        if get_key is None:
            raise LogicError('Attribute data must be parsed')

        old_value, seq_no, _, _ = self.getAttr(op[TARGET_NYM], get_key, field, isCommitted=False)

        if seq_no is not None:
            self.write_req_validator.validate(req,
                                              [AuthActionEdit(txn_type=ATTRIB,
                                                              field=field,
                                                              old_value=old_value,
                                                              new_value=value,
                                                              is_owner=is_owner)])
        else:
            self.write_req_validator.validate(req,
                                              [AuthActionAdd(txn_type=ATTRIB,
                                                             field=field,
                                                             value=value,
                                                             is_owner=is_owner)])
Exemplo n.º 5
0
 def preprepare_batch(self, pp: PrePrepare):
     """
     After pp had validated, it placed into _preprepared list
     """
     if preprepare_to_batch_id(pp) in self.consensus_data.preprepared:
         raise LogicError('New pp cannot be stored in preprepared')
     if self.consensus_data.checkpoints and pp.ppSeqNo < self.consensus_data.last_checkpoint.seqNoEnd:
         raise LogicError('ppSeqNo cannot be lower than last checkpoint')
     self.consensus_data.preprepared.append(preprepare_to_batch_id(pp))
Exemplo n.º 6
0
 def _subscribe_to_internal_msgs(self, name):
     if name not in self._internal_buses:
         raise LogicError(
             "For node {} does not exist internal bus".format(name))
     _bus = self._internal_buses.get(name)
     _bus.subscribe(NeedAddNode, self._process_add_new_node)
     _bus.subscribe(NeedRemoveNode, self._process_remove_node)
Exemplo n.º 7
0
 def discardTxns(self, count: int):
     """
     The number of txns in `uncommittedTxns` which have to be
     discarded
     :param count:
     :return:
     """
     # TODO: This can be optimised if multiple discards are combined
     # together since merkle root computation will be done only once.
     if count == 0:
         return
     if count > len(self.uncommittedTxns):
         raise LogicError(
             "expected to revert {} txns while there are only {}".format(
                 count, len(self.uncommittedTxns)))
     old_hash = self.uncommittedRootHash
     self.uncommittedTxns = self.uncommittedTxns[:-count]
     if not self.uncommittedTxns:
         self.uncommittedTree = None
         self.uncommittedRootHash = None
     else:
         self.uncommittedTree = self.treeWithAppliedTxns(
             self.uncommittedTxns)
         self.uncommittedRootHash = self.uncommittedTree.root_hash
     logger.info(
         'Discarding {} txns and root hash {} and new root hash is {}. {} are still uncommitted'
         .format(count, Ledger.hashToStr(old_hash),
                 Ledger.hashToStr(self.uncommittedRootHash),
                 len(self.uncommittedTxns)))
Exemplo n.º 8
0
 def register_new_database(self,
                           lid,
                           ledger: Ledger,
                           state: Optional[State] = None):
     if lid in self.databases:
         raise LogicError('Trying to add already existing database')
     self.databases[lid] = Database(ledger, state)
Exemplo n.º 9
0
 def subscribe(self, message_type: Type, handler: Handler):
     if message_type in self._handlers:
         raise LogicError(
             "Trying to assign handler {} for message type {}, "
             "but another handler is already assigned {}".format(
                 handler, message_type, self._handlers[message_type]))
     self._handlers[message_type] = handler
Exemplo n.º 10
0
    def preLedgerCatchUp(self, ledger_id):
        super().preLedgerCatchUp(ledger_id)

        if len(self.idrCache.un_committed) > 0:
            raise LogicError(
                '{} idr cache has uncommitted txns before catching up ledger {}'
                .format(self, ledger_id))
Exemplo n.º 11
0
    def postLedgerCatchUp(self, ledger_id):
        if len(self.idrCache.un_committed) > 0:
            raise LogicError(
                '{} idr cache has uncommitted txns after catching up ledger {}'
                .format(self, ledger_id))

        super().postLedgerCatchUp(ledger_id)
    def post_batch_applied(self,
                           three_pc_batch: ThreePcBatch,
                           prev_handler_result=None):
        node_txn_count = 0
        last_state = None
        if len(self.node_states) == 0:
            last_state = self.create_node_state_from_current_node()
        else:
            last_state = copy.deepcopy(self.node_states[-1])

        for digest in three_pc_batch.valid_digests:
            if digest not in self.node.requests:
                raise LogicError('Request is absent when it is applying')
            request = self.node.requests[digest].request
            if request.operation.get(TXN_TYPE) == NODE \
                    and request.operation.get(DATA).get(SERVICES) is not None:
                node_txn_count += 1
                node_nym = request.operation.get(TARGET_NYM)
                node_name = request.operation.get(DATA).get(ALIAS)
                curName = last_state.node_ids.get(node_nym)
                if curName is None:
                    last_state.node_ids[node_nym] = node_name
                elif curName != node_name:
                    raise LogicError("Alias inconsistency")

                serv = request.operation.get(DATA).get(SERVICES)
                if VALIDATOR in serv and node_name not in last_state.node_reg:
                    last_state.node_reg.append(node_name)
                elif serv == [] and node_name in last_state.node_reg:
                    last_state.node_reg.remove(node_name)

                count = self.get_required_number_of_instances(
                    len(last_state.node_reg))
                if last_state.number_of_inst != count:
                    last_state.number_of_inst = count
                    new_validators = TxnPoolManager.calc_node_names_ordered_by_rank(
                        last_state.node_reg, last_state.node_ids)
                    last_state.primaries = self.node.primaries_selector.select_primaries(
                        view_no=self.node.viewNo,
                        instance_count=last_state.number_of_inst,
                        validators=new_validators)

        # We will save node state at every pool batch, so we could revert it correctly
        self.node_states.append(last_state)
        three_pc_batch.primaries = last_state.primaries
        return last_state.primaries
Exemplo n.º 13
0
 def apply_bitmask_to_list(reqIdrs: List, mask: bitarray):
     if mask.length() == 0:
         return reqIdrs, []
     if len(reqIdrs) != mask.length():
         raise LogicError(
             "Length of reqIdr list and bitmask is not the same")
     return BitmaskHelper.get_valid_reqs(
         reqIdrs, mask), BitmaskHelper.get_invalid_reqs(reqIdrs, mask)
Exemplo n.º 14
0
    def _set_stable_checkpoint(self, end_seq_no):
        if not list(self._data.checkpoints.irange_key(end_seq_no, end_seq_no)):
            raise LogicError('Stable checkpoint must be in checkpoints')
        self._data.stable_checkpoint = end_seq_no

        self._data.checkpoints = \
            SortedListWithKey([c for c in self._data.checkpoints if c.seqNoEnd >= end_seq_no],
                              key=lambda checkpoint: checkpoint.seqNoEnd)
Exemplo n.º 15
0
 def process_ordered(self, ordered: Ordered):
     for batch_id in reversed(self._data.preprepared):
         if batch_id.pp_seq_no == ordered.ppSeqNo:
             self._add_to_checkpoint(batch_id.pp_seq_no, batch_id.view_no,
                                     ordered.auditTxnRootHash)
             return
     raise LogicError(
         "CheckpointService | Can't process Ordered msg because "
         "ppSeqNo {} not in preprepared".format(ordered.ppSeqNo))
Exemplo n.º 16
0
 def process_ordered(self, ordered: Ordered):
     for pp in reversed(self._data.preprepared):
         if pp.ppSeqNo == ordered.ppSeqNo:
             self._add_to_checkpoint(pp.ppSeqNo, pp.digest, pp.ledgerId,
                                     pp.viewNo)
             return
     raise LogicError(
         "CheckpointService | Can't process Ordered msg because "
         "ppSeqNo {} not in preprepared".format(ordered.ppSeqNo))
Exemplo n.º 17
0
    def _fill_primaries(self, txn, three_pc_batch, last_audit_txn):
        last_audit_txn_data = get_payload_data(
            last_audit_txn) if last_audit_txn is not None else None
        last_txn_value = last_audit_txn_data[
            AUDIT_TXN_PRIMARIES] if last_audit_txn_data else None
        current_primaries = three_pc_batch.primaries

        # 1. First audit txn
        if last_audit_txn_data is None:
            txn[AUDIT_TXN_PRIMARIES] = current_primaries

        # 2. Previous primaries field contains primary list
        # If primaries did not changed, we will store seq_no delta
        # between current txn and last persisted primaries, i.e.
        # we can find seq_no of last actual primaries, like:
        # last_audit_txn_seq_no - last_audit_txn[AUDIT_TXN_PRIMARIES]
        elif isinstance(last_txn_value, Iterable):
            if last_txn_value == current_primaries:
                txn[AUDIT_TXN_PRIMARIES] = 1
            else:
                txn[AUDIT_TXN_PRIMARIES] = current_primaries

        # 3. Previous primaries field is delta
        elif isinstance(last_txn_value,
                        int) and last_txn_value < self.ledger.uncommitted_size:
            last_primaries_seq_no = get_seq_no(last_audit_txn) - last_txn_value
            last_primaries = get_payload_data(
                self.ledger.get_by_seq_no_uncommitted(
                    last_primaries_seq_no))[AUDIT_TXN_PRIMARIES]
            if isinstance(last_primaries, Iterable):
                if last_primaries == current_primaries:
                    txn[AUDIT_TXN_PRIMARIES] = last_txn_value + 1
                else:
                    txn[AUDIT_TXN_PRIMARIES] = current_primaries
            else:
                raise LogicError(
                    'Value, mentioned in primaries field must be a '
                    'seq_no of a txn with primaries')

        # 4. That cannot be
        else:
            raise LogicError(
                'Incorrect primaries field in audit ledger (seq_no: {}. value: {})'
                .format(get_seq_no(last_audit_txn), last_txn_value))
Exemplo n.º 18
0
 def get_by_full_digest(self, full_digest):
     try:
         val = self._keyValueStorage.get(self._full_digest_key(full_digest))
         result = val.decode()
         if not isinstance(result, str):
             raise LogicError(
                 'SeqNoDB must store full_digest => payload_digest')
         return result
     except (KeyError, ValueError):
         return None
Exemplo n.º 19
0
 def stash(self, msg, reason):
     if reason == STASH_CATCH_UP:
         self._stashed_catch_up.append(msg)
     elif reason == STASH_VIEW:
         self._stashed_future_view.append(msg)
     elif reason == STASH_WATERMARKS:
         self._stashed_watermarks.append(msg)
     else:
         raise LogicError("Unknown Stash Type '{}' "
                          "for message {}".format(reason, msg))
Exemplo n.º 20
0
 def register_new_database(self,
                           lid,
                           ledger: Ledger,
                           state: Optional[State] = None,
                           taa_acceptance_required=True):
     if lid in self.databases:
         raise LogicError('Trying to add already existing database')
     self.databases[lid] = Database(
         ledger, state, taa_acceptance_required=taa_acceptance_required)
     self._init_db_list()
Exemplo n.º 21
0
    def remove_node(self, name):
        if name not in self.validators:
            raise LogicError("Node with name {} does not exist in pool".format(name))

        self.validators.remove(name)
        replicas = set([n for n in self.nodes if n.name == name])
        assert len(replicas) == 1
        node_obj = replicas.pop()
        self._nodes.remove(node_obj)
        self._update_connecteds()
        logger.info("Node {} was removed from pool".format(name))
Exemplo n.º 22
0
 def play_curtain_next(self, curtain_type):
     if curtain_type == MovieManager.OPEN_CURTAIN:
         self._play_open_curtain_next = True
         self._play_close_curtain_next = False
     elif curtain_type == MovieManager.CLOSE_CURTAIN:
         self._play_open_curtain_next = False
         self._play_close_curtain_next = True
     else:
         if self._logger.isEnabledFor(LazyLogger.DEBUG):
             self._logger.debug('Must specify OPEN or CLOSE curtain')
         raise LogicError()
Exemplo n.º 23
0
 def _set_node_order(self, nodeNym, nodeName):
     curName = self._ordered_node_ids.get(nodeNym)
     if curName is None:
         self._ordered_node_ids[nodeNym] = nodeName
         logger.info("{} sets node {} ({}) order to {}".format(
             self.name, nodeName, nodeNym,
             len(self._ordered_node_ids[nodeNym])))
     elif curName != nodeName:
         msg = ("{} is trying to order already ordered node {} ({}) "
                "with other alias {}".format(self.name, curName, nodeNym, nodeName))
         logger.warning(msg)
         raise LogicError(msg)
Exemplo n.º 24
0
 def _process_window(self):
     if self.state == self.State.NORMAL:
         self._process_window_in_normal_mode()
     elif self.state == self.State.IDLE:
         self._process_window_in_idle_mode()
     elif self.state == self.State.FADED:
         self._process_window_in_faded_mode()
     elif self.state == self.State.REVIVAL:
         self._process_window_in_revival_mode()
     else:
         raise LogicError("Internal state of throughput measurement {} "
                          "is unsupported".format(self.state))
Exemplo n.º 25
0
 def get_by_payload_digest(self, payload_digest):
     try:
         val = self._keyValueStorage.get(payload_digest)
         val = val.decode()
         result = self._parse_value(val)
         if not isinstance(result, Iterable) or len(result) != 2:
             raise LogicError(
                 'SeqNoDB must store payload_digest => ledger_id and seq_no'
             )
         return result
     except (KeyError, ValueError):
         return None, None
Exemplo n.º 26
0
    def on_catchup_complete(self):
        # Select primaries after usual catchup (not view change)
        ledger = self.node.getLedger(AUDIT_LEDGER_ID)
        self.node.backup_instance_faulty_processor.restore_replicas()
        self.node.drop_primaries()
        if len(ledger) == 0:
            self.node.select_primaries()
        else:
            # Emulate view change start
            self.node.view_changer.previous_view_no = self.node.viewNo
            self.node.viewNo = get_payload_data(ledger.get_last_committed_txn())[AUDIT_TXN_VIEW_NO]
            self.node.view_changer.previous_master_primary = self.node.master_primary_name
            self.node.view_changer.set_defaults()

            self.node.primaries = self._get_last_audited_primaries()
            if len(self.replicas) != len(self.node.primaries):
                raise LogicError('Audit ledger has inconsistent number of nodes')
            if any(p not in self.node.nodeReg for p in self.node.primaries):
                raise LogicError('Audit ledger has inconsistent names of nodes')
            # Similar functionality to select_primaries
            for instance_id, replica in self.replicas.items():
                if instance_id == 0:
                    self.node.start_participating()
                replica.primaryChanged(
                    Replica.generateName(self.node.primaries[instance_id], instance_id))
                self.node.primary_selected(instance_id)

        # Primary propagation
        last_sent_pp_seq_no_restored = False
        for replica in self.replicas.values():
            replica.on_propagate_primary_done()
        if self.node.view_changer.previous_view_no == 0:
            last_sent_pp_seq_no_restored = \
                self.node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no()
        if not last_sent_pp_seq_no_restored:
            self.node.last_sent_pp_store_helper.erase_last_sent_pp_seq_no()

        # Emulate view_change ending
        self.node.on_view_propagated()
Exemplo n.º 27
0
    def _do_select_master_primary(self, view_no: int, node_reg) -> str:
        # Get a list of nodes to be used for selection as the one at the beginning of last view
        # to guarantee that same primaries will be selected on all nodes once view change is started.
        # Remark: It's possible that there is no nodeReg for some views if no txns have been ordered there
        view_no_for_selection = view_no - 1 if view_no > 1 else 0
        while view_no_for_selection > 0 and view_no_for_selection not in node_reg:
            view_no_for_selection -= 1
        if view_no_for_selection not in node_reg:
            raise LogicError(
                "Can not find view_no {} in node_reg_at_beginning_of_view {}".
                format(view_no, node_reg))
        node_reg_to_use = node_reg[view_no_for_selection]

        return node_reg_to_use[view_no % len(node_reg_to_use)]
 def validate(self, msg) -> Tuple[int, Optional[str]]:
     if isinstance(msg, PrePrepare):
         return self.validate_pre_prepare(msg)
     if isinstance(msg, Prepare):
         return self.validate_prepare(msg)
     if isinstance(msg, Commit):
         return self.validate_commit(msg)
     if isinstance(msg, NewViewCheckpointsApplied):
         return self.validate_new_view(msg)
     if isinstance(msg, OldViewPrePrepareRequest):
         return self.validate_old_view_prep_prepare_req(msg)
     if isinstance(msg, OldViewPrePrepareReply):
         return self.validate_old_view_prep_prepare_rep(msg)
     raise LogicError("Unknown message type")
Exemplo n.º 29
0
    def process_selection(self, instance_count, node_reg, node_ids):
        # Select primaries for current view_no
        if instance_count == 0:
            return []

        '''
        Build a set of names of primaries, it is needed to avoid
        duplicates of primary nodes for different replicas.
        '''

        primaries = []
        primary_rank = None

        for i in range(instance_count):
            if i == 0:
                primary_name = self._next_primary_node_name_for_master(node_reg, node_ids)
                primary_rank = self.node.get_rank_by_name(primary_name, node_reg, node_ids)
                if primary_rank is None:
                    raise LogicError('primary_rank must not be None')
            else:
                primary_name, _ = self.next_primary_replica_name_for_backup(
                    i, primary_rank, primaries, node_reg, node_ids)

            primaries.append(primary_name)
            logger.display("{} selected primary {} for instance {} (view {})"
                           .format(PRIMARY_SELECTION_PREFIX,
                                   primary_name, i, self.viewNo),
                           extra={"cli": "ANNOUNCE",
                                  "tags": ["node-election"]})
        if len(primaries) != instance_count:
            raise LogicError('instances inconsistency')

        if len(primaries) != len(set(primaries)):
            raise LogicError('repeating instances')

        return primaries
Exemplo n.º 30
0
 def reject_batch(self):
     """
     Return hash reverting for and calculate count of reverted txns
     :return: root_hash, for reverting to (needed in revertToHead method) and count of reverted txns
     """
     prev_size = 0
     if len(self.un_committed) == 0:
         raise LogicError("No items to return")
     if len(self.un_committed) > 0:
         _, prev_size = self.un_committed.pop()
     if len(self.un_committed) == 0:
         committed_hash, committed_size = self.last_committed
         return committed_hash, prev_size - committed_size
     else:
         lhash, lsize = self.un_committed[-1]
         return lhash, prev_size - lsize