def __init__(self, tmpdir, config=None):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = dict()
     self.requests = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {
         name: HA("127.0.0.1", 0) for name in self.allNodeNames
     }
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.config = config or getConfigOnce()
     self.replicas = {
         0: Replica(node=self, instId=0, isMaster=True, config=self.config),
         1: Replica(node=self, instId=1, isMaster=False, config=self.config),
         2: Replica(node=self, instId=2, isMaster=False, config=self.config),
     }
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
     self.metrics = NullMetricsCollector()
Beispiel #2
0
 def __init__(self, tmpdir):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.replicas = [
         Replica(node=self, instId=0, isMaster=True),
         Replica(node=self, instId=1, isMaster=False),
         Replica(node=self, instId=2, isMaster=False),
     ]
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.config = getConfig()  # TODO do we need fake object here?
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
Beispiel #3
0
 def __init__(self, tmpdir):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = []
     self.viewNo = 0
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {
         name: HA("127.0.0.1", 0) for name in self.allNodeNames
     }
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.replicas = [
         Replica(node=self, instId=0, isMaster=True),
         Replica(node=self, instId=1, isMaster=False),
         Replica(node=self, instId=2, isMaster=False),
     ]
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.view_change_in_progress = True
     self.propagate_primary = False
Beispiel #4
0
class FakeNode:
    ledger_ids = [0]

    def __init__(self, tmpdir):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.f = 1
        self.replicas = []
        self.viewNo = 0
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {
            name: HA("127.0.0.1", 0) for name in self.allNodeNames
        }
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.replicas = [
            Replica(node=self, instId=0, isMaster=True),
            Replica(node=self, instId=1, isMaster=False),
            Replica(node=self, instId=2, isMaster=False),
        ]
        self._found = False
        self.ledgerManager = LedgerManager(self, ownedByNode=True)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_change_in_progress = True
        self.propagate_primary = False

    def get_name_by_rank(self, name, nodeReg=None):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return 'Node2'

    def primary_selected(self, instance_id):
        self._found = True

    def is_primary_found(self):
        return self._found

    @property
    def master_primary_name(self) -> Optional[str]:
        nm = self.replicas[0].primaryName
        if nm:
            return Replica.getNodeName(nm)

    @property
    def master_replica(self):
        return self.replicas[0]

    @property
    def is_synced(self):
        return self.mode >= Mode.synced
    def __init__(self, tmpdir, config=None):
        node_names = ['Node1', 'Node2', 'Node3', 'Node4']
        self.basedirpath = tmpdir
        self.name = node_names[0]
        self.viewNo = 0
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = node_names
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.poolManager = FakeSomething(
            node_names_ordered_by_rank=lambda: node_names)
        self.mode = Mode.starting
        self.monitor = FakeSomething(isMasterDegraded=lambda: False)
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.quorums = Quorums(self.totalNodes)
        self.nodestack = FakeSomething(connecteds=set(self.allNodeNames))
        self.write_manager = FakeSomething(
            node_reg_handler=NodeRegHandler(self.db_manager))
        self.primaries_selector = RoundRobinConstantNodesPrimariesSelector(
            node_names)
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config)
        }
        self.requiredNumberOfInstances = 2
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.internal_bus = InternalBus()
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False,
                       config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = create_view_changer(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
Beispiel #7
0
 def __init__(self):
     self.name = 'Node1'
     self.f = 1
     self.replicas = []
     self.viewNo = 0
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.replicas = [
         Replica(node=self, instId=0, isMaster=True),
         Replica(node=self, instId=1, isMaster=False),
         Replica(node=self, instId=2, isMaster=False),
     ]
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
Beispiel #8
0
    def _init_common_managers(self):
        # Pool manager init
        self.node.poolManager = TxnPoolManager(
            self.node, self.node.poolLedger, self.node.states[POOL_LEDGER_ID],
            self.node.write_manager, self.node.ha, self.node.cliname,
            self.node.cliha)

        # Ledger manager init
        ledger_sync_order = self.node.ledger_ids
        self.node.ledgerManager = LedgerManager(
            self.node,
            postAllLedgersCaughtUp=self.node.allLedgersCaughtUp,
            preCatchupClbk=self.node.preLedgerCatchUp,
            postCatchupClbk=self.node.postLedgerCatchUp,
            ledger_sync_order=ledger_sync_order,
            metrics=self.node.metrics)
def test_missing_txn_request(ledger_no_genesis):
    """
    Testing LedgerManager's `_missing_txns`
    """
    ledger = ledger_no_genesis
    for i in range(20):
        txn = random_txn(i)
        ledger.add(txn)

    # Callbacks don't matter in this test
    ledger_info = LedgerInfo(0, ledger, *[None] * 6)
    assert ledger_info.catchupReplyTimer is None
    assert LedgerManager._missing_txns(ledger_info) == (False, 0)

    ledger_info.catchupReplyTimer = time.perf_counter()

    # Ledger is already ahead
    cp = ConsistencyProof(0, 1, 10, 1, 1,
                          'GJybBTHjzMzPWsE6n9qNQWAmhJP88dTcdbgkGLhYGFYn',
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(1, 15)]
    assert not LedgerManager._missing_txns(ledger_info)[0]

    # Ledger is behind but catchup replies present
    cp = ConsistencyProof(0, 1, 30, 1, 1,
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
                          'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 31)]
    assert not LedgerManager._missing_txns(ledger_info)[0]
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 35)]
    assert not LedgerManager._missing_txns(ledger_info)[0]

    # Ledger is behind
    cp = ConsistencyProof(0, 1, 30, 1, 1,
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
                          'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 26)]
    assert LedgerManager._missing_txns(ledger_info) == (True, 5)

    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(26, 31)]
    assert LedgerManager._missing_txns(ledger_info) == (True, 5)
Beispiel #10
0
def test_missing_txn_request(ledger_no_genesis):
    """
    Testing LedgerManager's `_missing_txns`
    """
    ledger = ledger_no_genesis
    for i in range(20):
        txn = random_txn(i)
        ledger.add(txn)

    # Callbacks don't matter in this test
    ledger_info = LedgerInfo(0, ledger, *[None] * 6)
    assert ledger_info.catchupReplyTimer is None
    assert LedgerManager._missing_txns(ledger_info) == (False, 0)

    ledger_info.catchupReplyTimer = time.perf_counter()

    # Ledger is already ahead
    cp = ConsistencyProof(0, 1, 10, 1, 1,
                          'GJybBTHjzMzPWsE6n9qNQWAmhJP88dTcdbgkGLhYGFYn',
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(1, 15)]
    assert not LedgerManager._missing_txns(ledger_info)[0]

    # Ledger is behind but catchup replies present
    cp = ConsistencyProof(0, 1, 30, 1, 1,
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
                          'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 31)]
    assert not LedgerManager._missing_txns(ledger_info)[0]
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 35)]
    assert not LedgerManager._missing_txns(ledger_info)[0]

    # Ledger is behind
    cp = ConsistencyProof(0, 1, 30, 1, 1,
                          'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC',
                          'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', [])
    ledger_info.catchUpTill = cp
    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 26)]
    assert LedgerManager._missing_txns(ledger_info) == (True, 5)

    ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(26, 31)]
    assert LedgerManager._missing_txns(ledger_info) == (True, 5)
    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {
            name: HA("127.0.0.1", 0) for name in self.allNodeNames
        }
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False, config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self, ownedByNode=True)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = ViewChanger(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}
Beispiel #12
0
class FakeNode:
    ledger_ids = [POOL_LEDGER_ID, CONFIG_LEDGER_ID, DOMAIN_LEDGER_ID]

    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.internal_bus = InternalBus()
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {
            name: HA("127.0.0.1", 0) for name in self.allNodeNames
        }
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.internal_bus = InternalBus()
        self.quorums = Quorums(self.totalNodes)
        self.nodestack = FakeSomething(connecteds=set(self.allNodeNames))
        self.write_manager = FakeSomething()
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False, config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config)
        }
        self.requiredNumberOfInstances = 2
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = create_view_changer(self)
        self.primaries_selector = RoundRobinPrimariesSelector()
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True

    @property
    def viewNo(self):
        return None if self.view_changer is None else self.view_changer.view_no

    @property
    def ledger_summary(self):
        return [li.ledger_summary for li in
                self.ledgerManager.ledgerRegistry.values()]

    def get_name_by_rank(self, name, node_reg, node_ids):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return 'Node2'

    def primary_selected(self, instance_id):
        self._found = True

    def is_primary_found(self):
        return self._found

    @property
    def master_primary_name(self) -> Optional[str]:
        nm = self.replicas[0].primaryName
        if nm:
            return Replica.getNodeName(nm)

    @property
    def master_replica(self):
        return self.replicas[0]

    @property
    def is_synced(self):
        return self.mode >= Mode.synced

    def on_view_change_start(self):
        pass

    def start_catchup(self):
        pass

    def allLedgersCaughtUp(self):
        Node.allLedgersCaughtUp(self)

    def _clean_non_forwarded_ordered(self):
        return Node._clean_non_forwarded_ordered(self)

    def num_txns_caught_up_in_last_catchup(self):
        return Node.num_txns_caught_up_in_last_catchup(self)

    def set_view_change_status(self, value):
        return Node.set_view_change_status(self, value)

    def mark_request_as_executed(self, request):
        Node.mark_request_as_executed(self, request)

    def _clean_req_from_verified(self, request):
        pass

    def doneProcessingReq(self, key):
        pass

    def is_catchup_needed(self):
        return False

    def no_more_catchups_needed(self):
        pass

    def select_primaries(self):
        pass

    def utc_epoch(self):
        return get_utc_epoch()

    def get_validators(self):
        return []

    def set_view_for_replicas(self, a):
        pass

    def get_primaries_for_current_view(self):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return ['Node2', 'Node3']
Beispiel #13
0
class Client(Motor,
             MessageProcessor,
             HasFileStorage,
             HasPoolManager,
             HasActionQueue):
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA]=None,
                 ha: Union[HA, Tuple[str, int]]=None,
                 basedirpath: str=None,
                 config=None,
                 sighex: str=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        signer = Signer(sighex)
        sighex = signer.keyhex
        verkey = rawToFriendly(signer.verraw)

        self.name = name
        self.stackName = verkey

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(self.stackName, basedirpath):
            cha = getHaFromLocalEstate(self.stackName, basedirpath)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self, self.name, baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(0, self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setF()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info("Client {} initialized with the following node registry:"
                        .format(self.name))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.name))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))

    def getReqRepStore(self):
        return ClientReqRepStoreFile(self.name, self.basedirpath)

    def getTxnLogStore(self):
        return ClientTxnLog(self.name, self.basedirpath)

    def __repr__(self):
        return self.name

    def postPoolLedgerCaughtUp(self):
        self.mode = Mode.discovered
        # For the scenario where client has already connected to nodes reading
        #  the genesis pool transactions and that is enough
        if self.hasSufficientConnections:
            self.flushMsgsPendingConnection()

    def postTxnFromCatchupAddedToLedger(self, ledgerType: int, txn: Any):
        if ledgerType != 0:
            logger.error("{} got unknown ledger type {}".
                         format(self, ledgerType))
            return
        self.processPoolTxn(txn)

    # noinspection PyAttributeOutsideInit
    def setF(self):
        nodeCount = len(self.nodeReg)
        self.f = getMaxFailures(nodeCount)
        self.minNodesToConnect = self.f + 1
        self.totalNodes = nodeCount

    @staticmethod
    def exists(name, basedirpath):
        return os.path.exists(basedirpath) and \
               os.path.exists(os.path.join(basedirpath, name))

    @property
    def nodeStackClass(self) -> NodeStack:
        return NodeStack

    def start(self, loop):
        oldstatus = self.status
        if oldstatus in Status.going():
            logger.info("{} is already {}, so start has no effect".
                        format(self, self.status.name))
        else:
            super().start(loop)
            self.nodestack.start()
            self.nodestack.maintainConnections()
            if self._ledger:
                self.ledgerManager.setLedgerCanSync(0, True)
                self.mode = Mode.starting

    async def prod(self, limit) -> int:
        """
        async function that returns the number of events

        :param limit: The number of messages to be processed
        :return: The number of events up to a prescribed `limit`
        """
        s = 0
        if self.isGoing():
            s = await self.nodestack.service(limit)
            await self.nodestack.serviceLifecycle()
        self.nodestack.flushOutBoxes()
        s += self._serviceActions()
        # TODO: This if condition has to be removed. `_ledger` if once set wont
        # be reset ever so in `__init__` the `prod` method should be patched.
        if self._ledger:
            s += self.ledgerManager._serviceActions()
        return s

    def submitReqs(self, *reqs: Request) -> List[Request]:
        requests = []
        for request in reqs:
            if self.mode == Mode.discovered and self.hasSufficientConnections:
                self.nodestack.send(request)
                self.expectingFor(request)
            else:
                logger.debug("{} pending request since in mode {} and "
                             "connected to {} nodes".
                             format(self, self.mode, self.nodestack.connecteds))
                self.pendReqsTillConnection(request)
            requests.append(request)
        for r in requests:
            self.reqRepStore.addRequest(r)
        return requests

    def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
        """
        Handles single message from a node, and appends it to a queue
        :param wrappedMsg: Reply received by the client from the node
        """
        self.inBox.append(wrappedMsg)
        msg, frm = wrappedMsg
        # Do not print result of transaction type `POOL_LEDGER_TXNS` on the CLI
        ledgerTxnTypes = (POOL_LEDGER_TXNS, LEDGER_STATUS, CONSISTENCY_PROOF,
                          CATCHUP_REP)
        printOnCli = not excludeFromCli and msg.get(OP_FIELD_NAME) not \
                                            in ledgerTxnTypes
        logger.debug("Client {} got msg from node {}: {}".
                     format(self.name, frm, msg),
                     extra={"cli": printOnCli})
        if OP_FIELD_NAME in msg:
            if msg[OP_FIELD_NAME] in ledgerTxnTypes and self._ledger:
                op = msg.get(OP_FIELD_NAME, None)
                if not op:
                    raise MissingNodeOp
                # TODO: Refactor this copying
                cls = TaggedTuples.get(op, None)
                t = copy.deepcopy(msg)
                t.pop(OP_FIELD_NAME, None)
                cMsg = cls(**t)
                if msg[OP_FIELD_NAME] == POOL_LEDGER_TXNS:
                    self.poolTxnReceived(cMsg, frm)
                if msg[OP_FIELD_NAME] == LEDGER_STATUS:
                    self.ledgerManager.processLedgerStatus(cMsg, frm)
                if msg[OP_FIELD_NAME] == CONSISTENCY_PROOF:
                    self.ledgerManager.processConsistencyProof(cMsg, frm)
                if msg[OP_FIELD_NAME] == CATCHUP_REP:
                    self.ledgerManager.processCatchupRep(cMsg, frm)
            elif msg[OP_FIELD_NAME] == REQACK:
                self.reqRepStore.addAck(msg, frm)
                self.gotExpected(msg, frm)
            elif msg[OP_FIELD_NAME] == REQNACK:
                self.reqRepStore.addNack(msg, frm)
                self.gotExpected(msg, frm)
            elif msg[OP_FIELD_NAME] == REPLY:
                result = msg[f.RESULT.nm]
                identifier = msg[f.RESULT.nm][f.IDENTIFIER.nm]
                reqId = msg[f.RESULT.nm][f.REQ_ID.nm]
                numReplies = self.reqRepStore.addReply(identifier, reqId, frm,
                                                       result)
                self.gotExpected(msg, frm)
                self.postReplyRecvd(identifier, reqId, frm, result, numReplies)

    def postReplyRecvd(self, identifier, reqId, frm, result, numReplies):
        if not self.txnLog.hasTxn(identifier, reqId) and numReplies > self.f:
            replies = self.reqRepStore.getReplies(identifier, reqId).values()
            reply = checkIfMoreThanFSameItems(replies, self.f)
            if reply:
                self.txnLog.append(identifier, reqId, reply)
                return reply

    def _statusChanged(self, old, new):
        # do nothing for now
        pass

    def onStopping(self, *args, **kwargs):
        self.nodestack.nextCheck = 0
        self.nodestack.stop()
        if self._ledger:
            self.ledgerManager.setLedgerState(0, LedgerState.not_synced)
            self.mode = None

    def getReply(self, identifier: str, reqId: int) -> Optional[Reply]:
        """
        Accepts reply message from node if the reply is matching

        :param identifier: identifier of the entity making the request
        :param reqId: Request Id
        :return: Reply message only when valid and matching
        (None, NOT_FOUND)
        (None, UNCONFIRMED) f+1 not reached
        (reply, CONFIRMED) f+1 reached
        """
        try:
            cons = self.hasConsensus(identifier, reqId)
        except KeyError:
            return None, "NOT_FOUND"
        if cons:
            return cons, "CONFIRMED"
        return None, "UNCONFIRMED"

    def getRepliesFromAllNodes(self, identifier: str, reqId: int):
        """
        Accepts a request ID and return a list of results from all the nodes
        for that request

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        :return: list of request results from all nodes
        """
        return {frm: msg for msg, frm in self.inBox
                if msg[OP_FIELD_NAME] == REPLY and
                msg[f.RESULT.nm][f.REQ_ID.nm] == reqId and
                msg[f.RESULT.nm][f.IDENTIFIER.nm] == identifier}

    def hasConsensus(self, identifier: str, reqId: int) -> Optional[str]:
        """
        Accepts a request ID and returns True if consensus was reached
        for the request or else False

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        if not replies:
            raise KeyError('{}{}'.format(identifier, reqId))  # NOT_FOUND
        # Check if at least f+1 replies are received or not.
        if self.f + 1 > len(replies):
            return False  # UNCONFIRMED
        else:
            onlyResults = {frm: reply["result"] for frm, reply in
                           replies.items()}
            resultsList = list(onlyResults.values())
            # if all the elements in the resultList are equal - consensus
            # is reached.
            if all(result == resultsList[0] for result in resultsList):
                return resultsList[0]  # CONFIRMED
            else:
                logger.error(
                    "Received a different result from at least one of the nodes..")
                return checkIfMoreThanFSameItems(resultsList, self.f)

    def showReplyDetails(self, identifier: str, reqId: int):
        """
        Accepts a request ID and prints the reply details

        :param identifier: Client's identifier
        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        replyInfo = "Node {} replied with result {}"
        if replies:
            for frm, reply in replies.items():
                print(replyInfo.format(frm, reply['result']))
        else:
            print("No replies received from Nodes!")

    def onConnsChanged(self, joined: Set[str], left: Set[str]):
        """
        Modify the current status of the client based on the status of the
        connections changed.
        """
        if self.isGoing():
            if len(self.nodestack.conns) == len(self.nodeReg):
                self.status = Status.started
            elif len(self.nodestack.conns) >= self.minNodesToConnect:
                self.status = Status.started_hungry
            if self.hasSufficientConnections and self.mode == Mode.discovered:
                self.flushMsgsPendingConnection()
        if self._ledger:
            for n in joined:
                self.sendLedgerStatus(n)

    def replyIfConsensus(self, identifier, reqId: int):
        replies, errors = self.reqRepStore.getAllReplies(identifier, reqId)
        r = list(replies.values())[0] if len(replies) > self.f else None
        e = list(errors.values())[0] if len(errors) > self.f else None
        return r, e

    @property
    def hasSufficientConnections(self):
        return len(self.nodestack.conns) >= self.minNodesToConnect

    def hasMadeRequest(self, identifier, reqId: int):
        return self.reqRepStore.hasRequest(identifier, reqId)

    def isRequestSuccessful(self, identifier, reqId):
        acks = self.reqRepStore.getAcks(identifier, reqId)
        nacks = self.reqRepStore.getNacks(identifier, reqId)
        f = getMaxFailures(len(self.nodeReg))
        if len(acks) > f:
            return True, "Done"
        elif len(nacks) > f:
            # TODO: What if the the nacks were different from each node?
            return False, list(nacks.values())[0]
        else:
            return None

    def pendReqsTillConnection(self, request, signer=None):
        """
        Enqueue requests that need to be submitted until the client has
        sufficient connections to nodes
        :return:
        """
        self.reqsPendingConnection.append((request, signer))
        logger.debug("{} enqueuing request since not enough connections "
                     "with nodes: {}".format(self, request))

    def flushMsgsPendingConnection(self):
        queueSize = len(self.reqsPendingConnection)
        if queueSize > 0:
            logger.debug("Flushing pending message queue of size {}"
                         .format(queueSize))
            while self.reqsPendingConnection:
                req, signer = self.reqsPendingConnection.popleft()
                self.nodestack.send(req, signer=signer)

    def expectingFor(self, request: Request, nodes: Optional[Set[str]]=None):
        nodes = nodes or {r.name for r in self.nodestack.remotes.values()
                          if self.nodestack.isRemoteConnected(r)}
        now = time.perf_counter()
        self.expectingAcksFor[request.key] = (nodes, now, 0)
        self.expectingRepliesFor[request.key] = (copy.copy(nodes), now, 0)
        self.startRepeating(self.retryForExpected,
                            self.config.CLIENT_REQACK_TIMEOUT)

    def gotExpected(self, msg, frm):
        if msg[OP_FIELD_NAME] == REQACK:
            container = msg
            colls = (self.expectingAcksFor, )
        elif msg[OP_FIELD_NAME] == REPLY:
            container = msg[f.RESULT.nm]
            # If an REQACK sent by node was lost, the request when sent again
            # would fetch the reply or the client might just lose REQACK and not
            # REPLY so when REPLY received, request does not need to be resent
            colls = (self.expectingAcksFor, self.expectingRepliesFor)
        elif msg[OP_FIELD_NAME] == REQNACK:
            container = msg
            colls = (self.expectingAcksFor, self.expectingRepliesFor)
        else:
            raise RuntimeError("{} cannot retry {}".format(self, msg))

        idr = container.get(f.IDENTIFIER.nm)
        reqId = container.get(f.REQ_ID.nm)
        key = (idr, reqId)
        for coll in colls:
            if key in coll:
                if frm in coll[key][0]:
                    coll[key][0].remove(frm)
                if not coll[key][0]:
                    coll.pop(key)

        if not (self.expectingAcksFor or self.expectingRepliesFor):
            self.stopRepeating(self.retryForExpected, strict=False)

    def retryForExpected(self):
        now = time.perf_counter()
        keys = {}
        nodesNotSendingAck = set()

        # Collect nodes which did not send REQACK
        clearKeys = []
        for reqKey, (expectedFrom, lastTried, retries) in \
                self.expectingAcksFor.items():
            if now > (lastTried + self.config.CLIENT_REQACK_TIMEOUT):
                if retries < self.config.CLIENT_MAX_RETRY_ACK:
                    if reqKey not in keys:
                        keys[reqKey] = set()
                    keys[reqKey].update(expectedFrom)
                    nodesNotSendingAck.update(expectedFrom)
                else:
                    clearKeys.append(reqKey)
        for k in clearKeys:
            self.expectingAcksFor.pop(k)

        # Collect nodes which did not send REPLY
        clearKeys = []
        for reqKey, (expectedFrom, lastTried, retries) in \
                self.expectingRepliesFor.items():
            if now > (lastTried + self.config.CLIENT_REPLY_TIMEOUT):
                if retries < self.config.CLIENT_MAX_RETRY_REPLY:
                    if reqKey not in keys:
                        keys[reqKey] = set()
                    keys[reqKey].update(expectedFrom)
                else:
                    clearKeys.append(reqKey)
        for k in clearKeys:
            self.expectingRepliesFor.pop(k)

        for nm in nodesNotSendingAck:
            try:
                remote = self.nodestack.getRemote(nm)
            except RemoteNotFound:
                logger.warn('{} could not find remote {}'.format(self, nm))
                continue
            logger.debug('Remote {} of {} being joined since REQACK for not '
                         'received for request'.format(remote, self))
            self.nodestack.join(remote.uid, cascade=True)

        if keys:
            # Need a delay in case connection has to be established with some
            # nodes, a better way is not to assume the delay value but only
            # send requests once the connection is established. Also it is
            # assumed that connection is not established if a node not sending
            # REQACK/REQNACK/REPLY, but a little better way is to compare the
            # value in stats of the stack and look for changes in count of
            # `message_reject_rx` but that is not very helpful either since
            # it does not record which node rejected
            delay = 3 if nodesNotSendingAck else 0
            self._schedule(partial(self.resendRequests, keys), delay)

    def resendRequests(self, keys):
        for key, nodes in keys.items():
            if nodes:
                request = self.reqRepStore.getRequest(*key)
                logger.debug('{} resending request {} to {}'.
                             format(self, request, nodes))
                self.sendToNodes(request, nodes)
                now = time.perf_counter()
                if key in self.expectingAcksFor:
                    _, _, c = self.expectingAcksFor[key]
                    self.expectingAcksFor[key] = (nodes, now, c + 1)
                if key in self.expectingRepliesFor:
                    _, _, c = self.expectingRepliesFor[key]
                    self.expectingRepliesFor[key] = (nodes, now, c + 1)

    def sendLedgerStatus(self, nodeName: str):
        ledgerStatus = LedgerStatus(0, self.ledger.size, self.ledger.root_hash)
        rid = self.nodestack.getRemote(nodeName).uid
        self.nodestack.send(ledgerStatus, rid)

    def send(self, msg: Any, *rids: Iterable[int], signer: Signer = None):
        self.nodestack.send(msg, *rids, signer=signer)

    def sendToNodes(self, msg: Any, names: Iterable[str]):
        rids = [rid for rid, r in self.nodestack.remotes.items() if r.name in names]
        self.nodestack.send(msg, *rids)

    @staticmethod
    def verifyMerkleProof(*replies: Tuple[Reply]) -> bool:
        """
        Verifies the correctness of the merkle proof provided in the reply from
        the node. Returns True if verified to be correct, throws an exception
        otherwise.

        :param replies: One or more replies for which Merkle Proofs have to be
        verified
        :raises ProofError: The proof is invalid
        :return: True
        """
        verifier = MerkleVerifier()
        fields = getTxnOrderedFields()
        serializer = CompactSerializer(fields=fields)
        for r in replies:
            seqNo = r[f.RESULT.nm][F.seqNo.name]
            rootHash = base64.b64decode(
                r[f.RESULT.nm][F.rootHash.name].encode())
            auditPath = [base64.b64decode(
                a.encode()) for a in r[f.RESULT.nm][F.auditPath.name]]
            filtered = ((k, v) for (k, v) in r[f.RESULT.nm].iteritems()
                        if k not in
                        [F.auditPath.name, F.seqNo.name, F.rootHash.name])
            result = serializer.serialize(dict(filtered))
            verifier.verify_leaf_inclusion(result, seqNo - 1,
                                           auditPath,
                                           STH(tree_size=seqNo,
                                               sha256_root_hash=rootHash))
        return True
class FakeNode:
    ledger_ids = [0]

    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.f = 1
        self.replicas = dict()
        self.requests = []
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {
            name: HA("127.0.0.1", 0) for name in self.allNodeNames
        }
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False, config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self, ownedByNode=True)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = ViewChanger(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

    @property
    def viewNo(self):
        return None if self.view_changer is None else self.view_changer.view_no

    @property
    def ledger_summary(self):
        return [li.ledger_summary for li in
                self.ledgerManager.ledgerRegistry.values()]

    def get_name_by_rank(self, name, nodeReg=None):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return 'Node2'

    def primary_selected(self, instance_id):
        self._found = True

    def is_primary_found(self):
        return self._found

    @property
    def master_primary_name(self) -> Optional[str]:
        nm = self.replicas[0].primaryName
        if nm:
            return Replica.getNodeName(nm)

    @property
    def master_replica(self):
        return self.replicas[0]

    @property
    def is_synced(self):
        return self.mode >= Mode.synced

    def on_view_change_start(self):
        pass

    def start_catchup(self):
        pass
Beispiel #15
0
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA]=None,
                 ha: Union[HA, Tuple[str, int]]=None,
                 basedirpath: str=None,
                 config=None,
                 sighex: str=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        signer = Signer(sighex)
        sighex = signer.keyhex
        verkey = rawToFriendly(signer.verraw)

        self.name = name
        self.stackName = verkey

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(self.stackName, basedirpath):
            cha = getHaFromLocalEstate(self.stackName, basedirpath)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self, self.name, baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(0, self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setF()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info("Client {} initialized with the following node registry:"
                        .format(self.name))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.name))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))
Beispiel #16
0
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        :param lastReqId: Request Id of the last request sent by client
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(name, basedirpath):
            logger.debug("Client {} ignoring given ha".format(ha))
            cha = getHaFromLocalEstate(name, basedirpath)
            if cha:
                cha = HA(*cha)
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.name = name
        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self,
                                self.name,
                                baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                0,
                self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        self.setF()

        stackargs = dict(
            name=name,
            ha=cha,
            main=False,  # stops incoming vacuous joins
            auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs, self.handleOneNodeMsg,
                                             self.nodeReg)
        self.nodestack.onConnsChanged = self.onConnsChanged

        logger.info(
            "Client {} initialized with the following node registry:".format(
                name))
        lengths = [
            max(x)
            for x in zip(*[(len(name), len(host), len(str(port)))
                           for name, (host, port) in self.nodeReg.items()])
        ]
        fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
            *lengths)
        for name, (host, port) in self.nodeReg.items():
            logger.info(fmt.format(name, host, port))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger
        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))
Beispiel #17
0
class Client(Motor, MessageProcessor, HasFileStorage, HasPoolManager):
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        :param lastReqId: Request Id of the last request sent by client
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(name, basedirpath):
            logger.debug("Client {} ignoring given ha".format(ha))
            cha = getHaFromLocalEstate(name, basedirpath)
            if cha:
                cha = HA(*cha)
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.name = name
        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self,
                                self.name,
                                baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                0,
                self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        self.setF()

        stackargs = dict(
            name=name,
            ha=cha,
            main=False,  # stops incoming vacuous joins
            auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs, self.handleOneNodeMsg,
                                             self.nodeReg)
        self.nodestack.onConnsChanged = self.onConnsChanged

        logger.info(
            "Client {} initialized with the following node registry:".format(
                name))
        lengths = [
            max(x)
            for x in zip(*[(len(name), len(host), len(str(port)))
                           for name, (host, port) in self.nodeReg.items()])
        ]
        fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
            *lengths)
        for name, (host, port) in self.nodeReg.items():
            logger.info(fmt.format(name, host, port))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger
        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))

    def getReqRepStore(self):
        return ClientReqRepStoreFile(self.name, self.basedirpath)

    def getTxnLogStore(self):
        return ClientTxnLog(self.name, self.basedirpath)

    def __repr__(self):
        return self.name

    def postPoolLedgerCaughtUp(self):
        self.mode = Mode.discovered
        # For the scenario where client has already connected to nodes reading
        #  the genesis pool transactions and that is enough
        if self.hasSufficientConnections:
            self.flushMsgsPendingConnection()

    def postTxnFromCatchupAddedToLedger(self, ledgerType: int, txn: Any):
        if ledgerType != 0:
            logger.error("{} got unknown ledger type {}".format(
                self, ledgerType))
            return
        self.processPoolTxn(txn)

    # noinspection PyAttributeOutsideInit
    def setF(self):
        nodeCount = len(self.nodeReg)
        self.f = getMaxFailures(nodeCount)
        self.minNodesToConnect = self.f + 1
        self.totalNodes = nodeCount

    @staticmethod
    def exists(name, basedirpath):
        return os.path.exists(basedirpath) and \
               os.path.exists(os.path.join(basedirpath, name))

    @property
    def nodeStackClass(self) -> NodeStack:
        return NodeStack

    def start(self, loop):
        oldstatus = self.status
        if oldstatus in Status.going():
            logger.info("{} is already {}, so start has no effect".format(
                self, self.status.name))
        else:
            super().start(loop)
            self.nodestack.start()
            self.nodestack.maintainConnections()
            if self._ledger:
                self.ledgerManager.setLedgerCanSync(0, True)
                self.mode = Mode.starting

    async def prod(self, limit) -> int:
        """
        async function that returns the number of events

        :param limit: The number of messages to be processed
        :return: The number of events up to a prescribed `limit`
        """
        s = 0
        if self.isGoing():
            s = await self.nodestack.service(limit)
            await self.nodestack.serviceLifecycle()
        self.nodestack.flushOutBoxes()
        # TODO: This if condition has to be removed. `_ledger` if once set wont
        # be reset ever so in `__init__` the `prod` method should be patched.
        if self._ledger:
            s += self.ledgerManager._serviceActions()
        return s

    def createRequest(self,
                      operation: Mapping,
                      identifier: str = None) -> Request:
        """
        Client creates request which include requested operation and request Id

        :param operation: requested operation
        :return: New client request
        """

        request = Request(identifier=identifier or self.defaultIdentifier,
                          operation=operation)
        # DEPR
        # self.setReqId(request)
        return request

    def submitReqs(self, *reqs: Request) -> List[Request]:
        requests = []
        for request in reqs:
            # DEPR
            # self.setReqId(request)
            if self.mode == Mode.discovered and self.hasSufficientConnections:
                self.nodestack.send(request)
            else:
                self.pendReqsTillConnection(request)
            requests.append(request)
        for r in requests:
            self.reqRepStore.addRequest(r)
        return requests

    def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
        """
        Handles single message from a node, and appends it to a queue
        :param wrappedMsg: Reply received by the client from the node
        """
        self.inBox.append(wrappedMsg)
        msg, frm = wrappedMsg
        # Do not print result of transaction type `POOL_LEDGER_TXNS` on the CLI
        txnTypes = (POOL_LEDGER_TXNS, LEDGER_STATUS, CONSISTENCY_PROOF,
                    CATCHUP_REP)
        printOnCli = not excludeFromCli and msg.get(OP_FIELD_NAME) not \
                                            in txnTypes
        logger.debug("Client {} got msg from node {}: {}".format(
            self.name, frm, msg),
                     extra={"cli": printOnCli})
        if OP_FIELD_NAME in msg:
            if msg[OP_FIELD_NAME] in txnTypes and self._ledger:
                op = msg.get(OP_FIELD_NAME, None)
                if not op:
                    raise MissingNodeOp
                # TODO: Refactor this copying
                cls = TaggedTuples.get(op, None)
                t = copy.deepcopy(msg)
                t.pop(OP_FIELD_NAME, None)
                cMsg = cls(**t)
                if msg[OP_FIELD_NAME] == POOL_LEDGER_TXNS:
                    self.poolTxnReceived(cMsg, frm)
                if msg[OP_FIELD_NAME] == LEDGER_STATUS:
                    self.ledgerManager.processLedgerStatus(cMsg, frm)
                if msg[OP_FIELD_NAME] == CONSISTENCY_PROOF:
                    self.ledgerManager.processConsistencyProof(cMsg, frm)
                if msg[OP_FIELD_NAME] == CATCHUP_REP:
                    self.ledgerManager.processCatchupRep(cMsg, frm)
            elif msg[OP_FIELD_NAME] == REQACK:
                self.reqRepStore.addAck(msg, frm)
            elif msg[OP_FIELD_NAME] == REQNACK:
                self.reqRepStore.addNack(msg, frm)
            elif msg[OP_FIELD_NAME] == REPLY:
                result = msg[f.RESULT.nm]
                reqId = msg[f.RESULT.nm][f.REQ_ID.nm]
                numReplies = self.reqRepStore.addReply(reqId, frm, result)
                self.postReplyRecvd(reqId, frm, result, numReplies)

    def postReplyRecvd(self, reqId, frm, result, numReplies):
        if not self.txnLog.hasTxnWithReqId(reqId) and numReplies > self.f:
            replies = self.reqRepStore.getReplies(reqId).values()
            reply = checkIfMoreThanFSameItems(replies, self.f)
            if reply:
                self.txnLog.append(reqId, reply)
                return reply

    def _statusChanged(self, old, new):
        # do nothing for now
        pass

    def onStopping(self, *args, **kwargs):
        self.nodestack.nextCheck = 0
        self.nodestack.stop()
        if self._ledger:
            self.ledgerManager.setLedgerState(0, LedgerState.not_synced)
            self.mode = None

    def getReply(self, reqId: int) -> Optional[Reply]:
        """
        Accepts reply message from node if the reply is matching

        :param reqId: Request Id
        :return: Reply message only when valid and matching
        (None, NOT_FOUND)
        (None, UNCONFIRMED) f+1 not reached
        (reply, CONFIRMED) f+1 reached
        """
        try:
            cons = self.hasConsensus(reqId)
        except KeyError:
            return None, "NOT_FOUND"
        if cons:
            return cons, "CONFIRMED"
        return None, "UNCONFIRMED"

    def getRepliesFromAllNodes(self, reqId: int):
        """
        Accepts a request ID and return a list of results from all the nodes
        for that request

        :param reqId: Request ID
        :return: list of request results from all nodes
        """
        return {
            frm: msg
            for msg, frm in self.inBox if msg[OP_FIELD_NAME] == REPLY
            and msg[f.RESULT.nm][f.REQ_ID.nm] == reqId
        }

    def hasConsensus(self, reqId: int) -> Optional[str]:
        """
        Accepts a request ID and returns True if consensus was reached
        for the request or else False

        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(reqId)
        if not replies:
            raise KeyError(reqId)  # NOT_FOUND
        # Check if at least f+1 replies are received or not.
        if self.f + 1 > len(replies):
            return False  # UNCONFIRMED
        else:
            onlyResults = {
                frm: reply["result"]
                for frm, reply in replies.items()
            }
            resultsList = list(onlyResults.values())
            # if all the elements in the resultList are equal - consensus
            # is reached.
            if all(result == resultsList[0] for result in resultsList):
                return resultsList[0]  # CONFIRMED
            else:
                logger.error(
                    "Received a different result from at least one of the nodes.."
                )
                return checkIfMoreThanFSameItems(resultsList, self.f)

    def showReplyDetails(self, reqId: int):
        """
        Accepts a request ID and prints the reply details

        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(reqId)
        replyInfo = "Node {} replied with result {}"
        if replies:
            for frm, reply in replies.items():
                print(replyInfo.format(frm, reply['result']))
        else:
            print("No replies received from Nodes!")

    def onConnsChanged(self, joined: Set[str], left: Set[str]):
        """
        Modify the current status of the client based on the status of the
        connections changed.
        """
        if self.isGoing():
            if len(self.nodestack.conns) == len(self.nodeReg):
                self.status = Status.started
            elif len(self.nodestack.conns) >= self.minNodesToConnect:
                self.status = Status.started_hungry
            if self.hasSufficientConnections and self.mode == Mode.discovered:
                self.flushMsgsPendingConnection()
        if self._ledger:
            for n in joined:
                self.sendLedgerStatus(n)

    def replyIfConsensus(self, reqId: int):
        replies, errors = self.reqRepStore.getAllReplies(reqId)
        r = list(replies.values())[0] if len(replies) > self.f else None
        e = list(errors.values())[0] if len(errors) > self.f else None
        return r, e

    @property
    def hasSufficientConnections(self):
        return len(self.nodestack.conns) >= self.minNodesToConnect

    def hasMadeRequest(self, reqId: int):
        return self.reqRepStore.hasRequest(reqId)

    def isRequestSuccessful(self, reqId):
        acks = self.reqRepStore.getAcks(reqId)
        nacks = self.reqRepStore.getNacks(reqId)
        f = getMaxFailures(len(self.nodeReg))
        if len(acks) > f:
            return True, "Done"
        elif len(nacks) > f:
            # TODO: What if the the nacks were different from each node?
            return False, list(nacks.values())[0]
        else:
            return None

    def pendReqsTillConnection(self, request, signer=None):
        """
        Enqueue requests that need to be submitted until the client has
        sufficient connections to nodes
        :return:
        """
        self.reqsPendingConnection.append((request, signer))
        logger.debug("Enqueuing request since not enough connections "
                     "with nodes: {}".format(request))

    def flushMsgsPendingConnection(self):
        queueSize = len(self.reqsPendingConnection)
        if queueSize > 0:
            logger.debug(
                "Flushing pending message queue of size {}".format(queueSize))
            while self.reqsPendingConnection:
                req, signer = self.reqsPendingConnection.popleft()
                self.nodestack.send(req, signer=signer)

    def sendLedgerStatus(self, nodeName: str):
        ledgerStatus = LedgerStatus(0, self.ledger.size, self.ledger.root_hash)
        rid = self.nodestack.getRemote(nodeName).uid
        self.nodestack.send(ledgerStatus, rid)

    def send(self, msg: Any, *rids: Iterable[int], signer: Signer = None):
        self.nodestack.send(msg, *rids, signer=signer)

    @staticmethod
    def verifyMerkleProof(*replies: Tuple[Reply]) -> bool:
        """
        Verifies the correctness of the merkle proof provided in the reply from
        the node. Returns True if verified to be correct, throws an exception
        otherwise.

        :param replies: One or more replies for which Merkle Proofs have to be
        verified
        :raises ProofError: The proof is invalid
        :return: True
        """
        verifier = MerkleVerifier()
        fields = getTxnOrderedFields()
        serializer = CompactSerializer(fields=fields)
        for r in replies:
            seqNo = r[f.RESULT.nm][F.seqNo.name]
            rootHash = base64.b64decode(
                r[f.RESULT.nm][F.rootHash.name].encode())
            auditPath = [
                base64.b64decode(a.encode())
                for a in r[f.RESULT.nm][F.auditPath.name]
            ]
            filtered = (
                (k, v) for (k, v) in r[f.RESULT.nm].iteritems()
                if k not in [F.auditPath.name, F.seqNo.name, F.rootHash.name])
            result = serializer.serialize(dict(filtered))
            verifier.verify_leaf_inclusion(
                result, seqNo - 1, auditPath,
                STH(tree_size=seqNo, sha256_root_hash=rootHash))
        return True
class FakeNode:
    ledger_ids = [POOL_LEDGER_ID, CONFIG_LEDGER_ID, DOMAIN_LEDGER_ID]

    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {
            name: HA("127.0.0.1", 0) for name in self.allNodeNames
        }
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False, config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self, ownedByNode=True)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = ViewChanger(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

    @property
    def viewNo(self):
        return None if self.view_changer is None else self.view_changer.view_no

    @property
    def ledger_summary(self):
        return [li.ledger_summary for li in
                self.ledgerManager.ledgerRegistry.values()]

    def get_name_by_rank(self, name, nodeReg=None):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return 'Node2'

    def primary_selected(self, instance_id):
        self._found = True

    def is_primary_found(self):
        return self._found

    @property
    def master_primary_name(self) -> Optional[str]:
        nm = self.replicas[0].primaryName
        if nm:
            return Replica.getNodeName(nm)

    @property
    def master_replica(self):
        return self.replicas[0]

    @property
    def is_synced(self):
        return self.mode >= Mode.synced

    def on_view_change_start(self):
        pass

    def start_catchup(self):
        pass

    def allLedgersCaughtUp(self):
        Node.allLedgersCaughtUp(self)

    def _clean_non_forwarded_ordered(self):
        return Node._clean_non_forwarded_ordered(self)

    def num_txns_caught_up_in_last_catchup(self):
        return Node.num_txns_caught_up_in_last_catchup(self)

    def mark_request_as_executed(self, request):
        Node.mark_request_as_executed(self, request)

    def _clean_req_from_verified(self, request):
        pass

    def doneProcessingReq(self, key):
        pass

    def processStashedOrderedReqs(self):
        pass

    def is_catchup_needed(self):
        return False

    def no_more_catchups_needed(self):
        pass

    def select_primaries(self):
        pass
Beispiel #19
0
def ledger_manager():
    fakeNode = FakeSomething(timer=None, allNodeNames=set('Node1'))
    lm = LedgerManager(fakeNode)
    setattr(fakeNode, 'ledgerManager', lm)
    return lm
def test_ledger_status_quorum():
    N = 10
    f = getMaxFailures(N)
    assert not LedgerManager.has_ledger_status_quorum(f + 1, N)
    assert LedgerManager.has_ledger_status_quorum(N - f - 1, N)
Beispiel #21
0
class FakeNode:
    ledger_ids = [POOL_LEDGER_ID, CONFIG_LEDGER_ID, DOMAIN_LEDGER_ID]

    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False,
                       config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self, ownedByNode=True)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = ViewChanger(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

    @property
    def viewNo(self):
        return None if self.view_changer is None else self.view_changer.view_no

    @property
    def ledger_summary(self):
        return [
            li.ledger_summary
            for li in self.ledgerManager.ledgerRegistry.values()
        ]

    def get_name_by_rank(self, name, nodeReg=None):
        # This is used only for getting name of next primary, so
        # it just returns a constant
        return 'Node2'

    def primary_selected(self, instance_id):
        self._found = True

    def is_primary_found(self):
        return self._found

    @property
    def master_primary_name(self) -> Optional[str]:
        nm = self.replicas[0].primaryName
        if nm:
            return Replica.getNodeName(nm)

    @property
    def master_replica(self):
        return self.replicas[0]

    @property
    def is_synced(self):
        return self.mode >= Mode.synced

    def on_view_change_start(self):
        pass

    def start_catchup(self):
        pass

    def allLedgersCaughtUp(self):
        Node.allLedgersCaughtUp(self)

    def _clean_non_forwarded_ordered(self):
        return Node._clean_non_forwarded_ordered(self)

    def num_txns_caught_up_in_last_catchup(self):
        return Node.num_txns_caught_up_in_last_catchup(self)

    def mark_request_as_executed(self, request):
        Node.mark_request_as_executed(self, request)

    def _clean_req_from_verified(self, request):
        pass

    def doneProcessingReq(self, key):
        pass

    def processStashedOrderedReqs(self):
        pass

    def is_catchup_needed(self):
        return False

    def no_more_catchups_needed(self):
        pass

    def select_primaries(self):
        pass

    def utc_epoch(self):
        return get_utc_epoch()
Beispiel #22
0
class Client(Motor,
             MessageProcessor,
             HasFileStorage,
             HasPoolManager,
             HasActionQueue):
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 genesis_dir: str = None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 plugins_dir: str = None,
                 config=None,
                 sighex: str = None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()

        dataDir = self.config.clientDataDir or "data/clients"
        self.basedirpath = basedirpath or self.config.CLI_BASE_DIR
        self.basedirpath = os.path.expanduser(self.basedirpath)

        signer = Signer(sighex)
        sighex = signer.keyraw
        verkey = rawToFriendly(signer.verraw)

        self.stackName = verkey
        # TODO: Have a way for a client to have a user friendly name. Does it
        # matter now, it used to matter in some CLI exampples in the past.
        # self.name = name
        self.name = self.stackName or 'Client~' + str(id(self))

        self.genesis_dir = genesis_dir or self.basedirpath
        self.ledger_dir = ledger_dir or os.path.join(self.basedirpath, dataDir, self.name)
        self.plugins_dir = plugins_dir or self.basedirpath
        _keys_dir = keys_dir or self.basedirpath
        self.keys_dir = os.path.join(_keys_dir, "keys")

        cha = None
        if self.exists(self.stackName, self.keys_dir):
            cha = self.nodeStackClass.getHaFromLocal(
                self.stackName, self.keys_dir)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        HasFileStorage.__init__(self, self.ledger_dir)

        # TODO: Find a proper name
        self.alias = name

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                POOL_LEDGER_ID,
                self.ledger,
                preCatchupStartClbk=self.prePoolLedgerCatchup,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setPoolParams()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auth_mode=AuthMode.ALLOW_ANY.value)
        stackargs['basedirpath'] = self.keys_dir
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        # TODO I think this is a bug here, sighex is getting passed in the seed
        # parameter
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info(
                "Client {} initialized with the following node registry:".format(
                    self.alias))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.alias))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        self._observers = {}  # type Dict[str, Callable]
        self._observerSet = set()  # makes it easier to guard against duplicates

        plugins_to_load = self.config.PluginsToLoad if hasattr(self.config, "PluginsToLoad") else None
        tp = loadPlugins(self.plugins_dir, plugins_to_load)

        logger.debug("total plugins loaded in client: {}".format(tp))

        self._multi_sig_verifier = self._create_multi_sig_verifier()
        self._read_only_requests = set()

    @lazy_field
    def _bls_register(self):
        return BlsKeyRegisterPoolLedger(self.ledger)

    def _create_multi_sig_verifier(self) -> BlsCryptoVerifier:
        verifier = create_default_bls_crypto_factory() \
            .create_bls_crypto_verifier()
        return verifier

    def getReqRepStore(self):
        return ClientReqRepStoreFile(self.ledger_dir)

    def getTxnLogStore(self):
        return ClientTxnLog(self.ledger_dir)

    def __repr__(self):
        return self.name

    def prePoolLedgerCatchup(self, **kwargs):
        self.mode = Mode.discovering

    def postPoolLedgerCaughtUp(self):
        self.mode = Mode.discovered
        # For the scenario where client has already connected to nodes reading
        #  the genesis pool transactions and that is enough
        self.flushMsgsPendingConnection()

    def postTxnFromCatchupAddedToLedger(self, ledgerType: int, txn: Any):
        if ledgerType != 0:
            logger.error("{} got unknown ledger type {}".
                         format(self, ledgerType))
            return
        self.processPoolTxn(txn)

    # noinspection PyAttributeOutsideInit
    def setPoolParams(self):
        nodeCount = len(self.nodeReg)
        self.f = getMaxFailures(nodeCount)
        self.minNodesToConnect = self.f + 1
        self.totalNodes = nodeCount
        self.quorums = Quorums(nodeCount)
        logger.info(
            "{} updated its pool parameters: f {}, totalNodes {},"
            "minNodesToConnect {}, quorums {}".format(
                self.alias,
                self.f, self.totalNodes,
                self.minNodesToConnect, self.quorums))

    @staticmethod
    def exists(name, base_dir):
        return os.path.exists(base_dir) and \
            os.path.exists(os.path.join(base_dir, name))

    @property
    def nodeStackClass(self) -> NetworkInterface:
        return nodeStackClass

    def start(self, loop):
        oldstatus = self.status
        if oldstatus in Status.going():
            logger.info("{} is already {}, so start has no effect".
                        format(self.alias, self.status.name))
        else:
            super().start(loop)
            self.nodestack.start()
            self.nodestack.maintainConnections(force=True)
            if self.ledger:
                self.ledgerManager.setLedgerCanSync(POOL_LEDGER_ID, True)
                self.mode = Mode.starting

    async def prod(self, limit) -> int:
        """
        async function that returns the number of events

        :param limit: The number of messages to be processed
        :return: The number of events up to a prescribed `limit`
        """
        s = 0
        if self.isGoing():
            s = await self.nodestack.service(limit)
            self.nodestack.serviceLifecycle()
        self.nodestack.flushOutBoxes()
        s += self._serviceActions()
        # TODO: This if condition has to be removed. `_ledger` if once set wont
        # be reset ever so in `__init__` the `prod` method should be patched.
        if self.ledger:
            s += self.ledgerManager._serviceActions()
        return s

    def submitReqs(self, *reqs: Request) -> Tuple[List[Request], List[str]]:
        requests = []
        errs = []

        for request in reqs:
            is_read_only = request.txn_type in self._read_only_requests
            if self.can_send_request(request):
                recipients = self._connected_node_names
                if is_read_only and len(recipients) > 1:
                    recipients = random.sample(list(recipients), 1)

                logger.debug('Client {} sending request {} to recipients {}'
                             .format(self, request, recipients))

                stat, err_msg = self.sendToNodes(request, names=recipients)

                if stat:
                    self._expect_replies(request, recipients)
                else:
                    errs.append(err_msg)
                    logger.debug(
                        'Client {} request failed {}'.format(self, err_msg))
                    continue
            else:
                logger.debug(
                    "{} pending request since in mode {} and "
                    "connected to {} nodes".format(
                        self, self.mode, self.nodestack.connecteds))
                self.pendReqsTillConnection(request)
            requests.append(request)
        for r in requests:
            self.reqRepStore.addRequest(r)
        return requests, errs

    def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
        """
        Handles single message from a node, and appends it to a queue
        :param wrappedMsg: Reply received by the client from the node
        """
        self.inBox.append(wrappedMsg)
        msg, frm = wrappedMsg
        # Do not print result of transaction type `POOL_LEDGER_TXNS` on the CLI
        ledgerTxnTypes = (POOL_LEDGER_TXNS, LEDGER_STATUS, CONSISTENCY_PROOF,
                          CATCHUP_REP)
        printOnCli = not excludeFromCli and msg.get(OP_FIELD_NAME) not \
            in ledgerTxnTypes
        logger.info("Client {} got msg from node {}: {}".
                    format(self.name, frm, msg),
                    extra={"cli": printOnCli})
        if OP_FIELD_NAME in msg:
            if msg[OP_FIELD_NAME] in ledgerTxnTypes and self.ledger:
                cMsg = node_message_factory.get_instance(**msg)
                if msg[OP_FIELD_NAME] == POOL_LEDGER_TXNS:
                    self.poolTxnReceived(cMsg, frm)
                if msg[OP_FIELD_NAME] == LEDGER_STATUS:
                    self.ledgerManager.processLedgerStatus(cMsg, frm)
                if msg[OP_FIELD_NAME] == CONSISTENCY_PROOF:
                    self.ledgerManager.processConsistencyProof(cMsg, frm)
                if msg[OP_FIELD_NAME] == CATCHUP_REP:
                    self.ledgerManager.processCatchupRep(cMsg, frm)
            elif msg[OP_FIELD_NAME] == REQACK:
                self.reqRepStore.addAck(msg, frm)
                self._got_expected(msg, frm)
            elif msg[OP_FIELD_NAME] == REQNACK:
                self.reqRepStore.addNack(msg, frm)
                self._got_expected(msg, frm)
            elif msg[OP_FIELD_NAME] == REJECT:
                self.reqRepStore.addReject(msg, frm)
                self._got_expected(msg, frm)
            elif msg[OP_FIELD_NAME] == REPLY:
                result = msg[f.RESULT.nm]
                identifier = get_reply_itentifier(result)
                reqId = get_reply_reqId(result)
                numReplies = self.reqRepStore.addReply(identifier,
                                                       reqId,
                                                       frm,
                                                       result)

                self._got_expected(msg, frm)
                self.postReplyRecvd(identifier, reqId, frm, result, numReplies)

    def postReplyRecvd(self, identifier, reqId, frm, result, numReplies):
        if not self.txnLog.hasTxn(identifier, reqId):
            reply, _ = self.getReply(identifier, reqId)
            if reply:
                self.txnLog.append(identifier, reqId, reply)
                for name in self._observers:
                    try:
                        self._observers[name](name, reqId, frm, result,
                                              numReplies)
                    except Exception as ex:
                        # TODO: All errors should not be shown on CLI, or maybe we
                        # show errors with different color according to the
                        # severity. Like an error occurring due to node sending
                        # a malformed message should not result in an error message
                        # being shown on the cli since the clients would anyway
                        # collect enough replies from other nodes.
                        logger.debug("Observer threw an exception", exc_info=ex)
                return reply
            # Reply is not verified
            key = (identifier, reqId)
            if key not in self.expectingRepliesFor and numReplies == 1:
                # only one node was asked, but its reply cannot be confirmed,
                # so ask other nodes
                recipients = self._connected_node_names.difference({frm})
                self.resendRequests({
                    (identifier, reqId): recipients
                }, force_expect=True)

    def _statusChanged(self, old, new):
        # do nothing for now
        pass

    def onStopping(self, *args, **kwargs):
        logger.debug('Stopping client {}'.format(self))
        self.nodestack.nextCheck = 0
        self.nodestack.stop()
        if self.ledger:
            self.ledgerManager.setLedgerState(
                POOL_LEDGER_ID, LedgerState.not_synced)
            self.mode = None
            self.ledger.stop()
            if self.hashStore and not self.hashStore.closed:
                self.hashStore.close()
        self.txnLog.close()

    def getReply(self, identifier: str, reqId: int) -> Optional:
        """
        Accepts reply message from node if the reply is matching

        :param identifier: identifier of the entity making the request
        :param reqId: Request Id
        :return: Reply message only when valid and matching
        (None, NOT_FOUND)
        (None, UNCONFIRMED) f+1 not reached
        (reply, CONFIRMED) f+1 reached
        """
        try:
            cons = self.hasConsensus(identifier, reqId)
        except KeyError:
            return None, "NOT_FOUND"
        if cons:
            return cons, "CONFIRMED"
        return None, "UNCONFIRMED"

    def getRepliesFromAllNodes(self, identifier: str, reqId: int):
        """
        Accepts a request ID and return a list of results from all the nodes
        for that request

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        :return: list of request results from all nodes
        """
        return {frm: msg for msg, frm in self.inBox
                if msg[OP_FIELD_NAME] == REPLY and get_reply_reqId(msg[f.RESULT.nm]) == reqId and
                get_reply_itentifier(msg[f.RESULT.nm]) == identifier}

    def hasConsensus(self, identifier: str, reqId: int) -> Optional[Reply]:
        """
        Accepts a request ID and returns reply for it if quorum achieved or
        there is a state proof for it.

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        """
        full_req_id = '({}:{})'.format(identifier, reqId)
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        if not replies:
            raise KeyError(full_req_id)
        proved_reply = self.take_one_proved(replies, full_req_id)
        if proved_reply:
            logger.debug("Found proved reply for {}".format(full_req_id))
            return proved_reply
        quorumed_reply = self.take_one_quorumed(replies, full_req_id)
        if quorumed_reply:
            logger.debug("Reply quorum for {} achieved"
                         .format(full_req_id))
            return quorumed_reply

    def take_one_quorumed(self, replies, full_req_id):
        """
        Checks whether there is sufficint number of equal replies from
        different nodes. It uses following logic:

        1. Check that there are sufficient replies received at all.
           If not - return None.
        2. Check that all these replies are equal.
           If yes - return one of them.
        3. Check that there is a group of equal replies which is large enough.
           If yes - return one reply from this group.
        4. Return None

        """
        if not self.quorums.reply.is_reached(len(replies)):
            return None

        # excluding state proofs from check since they can be different
        def without_state_proof(result):
            if STATE_PROOF in result:
                result.pop(STATE_PROOF)
            return result

        results = [without_state_proof(reply["result"])
                   for reply in replies.values()]

        first = results[0]
        if all(result == first for result in results):
            return first
        logger.debug("Received a different result from "
                     "at least one node for {}"
                     .format(full_req_id))

        result, freq = mostCommonElement(results)
        if not self.quorums.reply.is_reached(freq):
            return None
        return result

    def take_one_proved(self, replies, full_req_id):
        """
        Returns one reply with valid state proof
        """
        for sender, reply in replies.items():
            result = reply['result']
            if STATE_PROOF not in result or result[STATE_PROOF] is None:
                logger.debug("There is no state proof in "
                             "reply for {} from {}"
                             .format(full_req_id, sender))
                continue
            if not self.validate_multi_signature(result[STATE_PROOF]):
                logger.debug("{} got reply for {} with bad "
                             "multi signature from {}"
                             .format(self.name, full_req_id, sender))
                # TODO: do something with this node
                continue
            if not self.validate_proof(result):
                logger.debug("{} got reply for {} with invalid "
                             "state proof from {}"
                             .format(self.name, full_req_id, sender))
                # TODO: do something with this node
                continue
            return result

    def validate_multi_signature(self, state_proof):
        """
        Validates multi signature
        """
        multi_signature = state_proof[MULTI_SIGNATURE]
        if not multi_signature:
            logger.debug("There is a state proof, but no multi signature")
            return False

        participants = multi_signature[MULTI_SIGNATURE_PARTICIPANTS]
        signature = multi_signature[MULTI_SIGNATURE_SIGNATURE]
        value = MultiSignatureValue(
            **(multi_signature[MULTI_SIGNATURE_VALUE])
        ).as_single_value()
        if not self.quorums.bls_signatures.is_reached(len(participants)):
            logger.debug("There is not enough participants of "
                         "multi-signature")
            return False
        public_keys = []
        for node_name in participants:
            key = self._bls_register.get_key_by_name(node_name)
            if key is None:
                logger.debug("There is no bls key for node {}"
                             .format(node_name))
                return False
            public_keys.append(key)
        return self._multi_sig_verifier.verify_multi_sig(signature,
                                                         value,
                                                         public_keys)

    def validate_proof(self, result):
        """
        Validates state proof
        """
        state_root_hash = result[STATE_PROOF]['root_hash']
        state_root_hash = state_roots_serializer.deserialize(state_root_hash)
        proof_nodes = result[STATE_PROOF]['proof_nodes']
        if isinstance(proof_nodes, str):
            proof_nodes = proof_nodes.encode()
        proof_nodes = proof_nodes_serializer.deserialize(proof_nodes)
        key, value = self.prepare_for_state(result)
        valid = PruningState.verify_state_proof(state_root_hash,
                                                key,
                                                value,
                                                proof_nodes,
                                                serialized=True)
        return valid

    def prepare_for_state(self, result) -> tuple:
        # this should be overridden
        pass

    def showReplyDetails(self, identifier: str, reqId: int):
        """
        Accepts a request ID and prints the reply details

        :param identifier: Client's identifier
        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        replyInfo = "Node {} replied with result {}"
        if replies:
            for frm, reply in replies.items():
                print(replyInfo.format(frm, reply['result']))
        else:
            print("No replies received from Nodes!")

    def onConnsChanged(self, joined: Set[str], left: Set[str]):
        """
        Modify the current status of the client based on the status of the
        connections changed.
        """
        if self.isGoing():
            if len(self.nodestack.conns) == len(self.nodeReg):
                self.status = Status.started
            elif len(self.nodestack.conns) >= self.minNodesToConnect:
                self.status = Status.started_hungry
            self.flushMsgsPendingConnection()
        if self.ledger:
            for n in joined:
                self.sendLedgerStatus(n)

    @property
    def hasSufficientConnections(self):
        return len(self.nodestack.conns) >= self.minNodesToConnect

    @property
    def hasAnyConnections(self):
        return len(self.nodestack.conns) > 0

    def can_send_write_requests(self):
        if not Mode.is_done_discovering(self.mode):
            return False
        if not self.hasSufficientConnections:
            return False
        return True

    def can_send_read_requests(self):
        if not Mode.is_done_discovering(self.mode):
            return False
        if not self.hasAnyConnections:
            return False
        return True

    def can_send_request(self, request):
        if not Mode.is_done_discovering(self.mode):
            return False
        if self.hasSufficientConnections:
            return True
        if not self.hasAnyConnections:
            return False
        if request.isForced():
            return True
        is_read_only = request.txn_type in self._read_only_requests
        if is_read_only:
            return True
        return False

    def pendReqsTillConnection(self, request, signer=None):
        """
        Enqueue requests that need to be submitted until the client has
        sufficient connections to nodes
        :return:
        """
        self.reqsPendingConnection.append((request, signer))
        logger.debug("{} enqueuing request since not enough connections "
                     "with nodes: {}".format(self, request))

    def flushMsgsPendingConnection(self):
        queueSize = len(self.reqsPendingConnection)
        if queueSize > 0:
            logger.debug("Flushing pending message queue of size {}"
                         .format(queueSize))
            tmp = deque()
            while self.reqsPendingConnection:
                req, signer = self.reqsPendingConnection.popleft()
                if self.can_send_request(req):
                    self.send(req, signer=signer)
                else:
                    tmp.append((req, signer))
            self.reqsPendingConnection.extend(tmp)

    def _expect_replies(self, request: Request,
                        nodes: Optional[Set[str]] = None):
        nodes = nodes if nodes else self._connected_node_names
        now = time.perf_counter()
        self.expectingAcksFor[request.key] = (nodes, now, 0)
        self.expectingRepliesFor[request.key] = (copy.copy(nodes), now, 0)
        self.startRepeating(self._retry_for_expected,
                            self.config.CLIENT_REQACK_TIMEOUT)

    @property
    def _connected_node_names(self):
        return {
            remote.name
            for remote in self.nodestack.remotes.values()
            if self.nodestack.isRemoteConnected(remote)
        }

    def _got_expected(self, msg, sender):

        def drop(identifier, reqId, register):
            key = (identifier, reqId)
            if key in register:
                received = register[key][0]
                if sender in received:
                    received.remove(sender)
                if not received:
                    register.pop(key)

        if msg[OP_FIELD_NAME] == REQACK:
            drop(get_reply_itentifier(msg), get_reply_reqId(msg), self.expectingAcksFor)
        elif msg[OP_FIELD_NAME] == REPLY:
            drop(get_reply_itentifier(msg[f.RESULT.nm]), get_reply_reqId(msg[f.RESULT.nm]), self.expectingAcksFor)
            drop(get_reply_itentifier(msg[f.RESULT.nm]), get_reply_reqId(msg[f.RESULT.nm]), self.expectingRepliesFor)
        elif msg[OP_FIELD_NAME] in (REQNACK, REJECT):
            drop(get_reply_itentifier(msg), get_reply_reqId(msg), self.expectingAcksFor)
            drop(get_reply_itentifier(msg), get_reply_reqId(msg), self.expectingRepliesFor)
        else:
            raise RuntimeError("{} cannot retry {}".format(self, msg))

        if not self.expectingAcksFor and not self.expectingRepliesFor:
            self._stop_expecting()

    def _stop_expecting(self):
        self.stopRepeating(self._retry_for_expected, strict=False)

    def _filter_expected(self, now, queue, retry_timeout, max_retry):
        dead_requests = []
        alive_requests = {}
        not_answered_nodes = set()
        for requestKey, (expected_from, last_tried, retries) in queue.items():
            if now < last_tried + retry_timeout:
                continue
            if retries >= max_retry:
                dead_requests.append(requestKey)
                continue
            if requestKey not in alive_requests:
                alive_requests[requestKey] = set()
            alive_requests[requestKey].update(expected_from)
            not_answered_nodes.update(expected_from)
        return dead_requests, alive_requests, not_answered_nodes

    def _retry_for_expected(self):
        now = time.perf_counter()

        requests_with_no_ack, alive_requests, not_acked_nodes = \
            self._filter_expected(now,
                                  self.expectingAcksFor,
                                  self.config.CLIENT_REQACK_TIMEOUT,
                                  self.config.CLIENT_MAX_RETRY_ACK)

        requests_with_no_reply, alive_requests, not_replied_nodes = \
            self._filter_expected(now,
                                  self.expectingRepliesFor,
                                  self.config.CLIENT_REPLY_TIMEOUT,
                                  self.config.CLIENT_MAX_RETRY_REPLY)

        for request_key in requests_with_no_ack:
            logger.debug('{} have got no ACKs for {} and will not try again'
                         .format(self, request_key))
            self.expectingAcksFor.pop(request_key)

        for request_key in requests_with_no_reply:
            logger.debug('{} have got no REPLYs for {} and will not try again'
                         .format(self, request_key))
            self.expectingRepliesFor.pop(request_key)

        if not_acked_nodes:
            logger.debug('{} going to retry for {}'
                         .format(self, self.expectingAcksFor.keys()))

        for node_name in not_acked_nodes:
            try:
                remote = self.nodestack.getRemote(node_name)
            except RemoteNotFound:
                logger.warning('{}{} could not find remote {}'
                               .format(CONNECTION_PREFIX, self, node_name))
                continue
            logger.debug('Remote {} of {} being joined since REQACK for not '
                         'received for request'.format(remote, self))

            # This makes client to reconnect
            # even if pool is just busy and cannot answer quickly,
            # that's why using maintainConnections instead
            # self.nodestack.connect(name=remote.name)
            self.nodestack.maintainConnections(force=True)

        if alive_requests:
            # Need a delay in case connection has to be established with some
            # nodes, a better way is not to assume the delay value but only
            # send requests once the connection is established. Also it is
            # assumed that connection is not established if a node not sending
            # REQACK/REQNACK/REJECT/REPLY, but a little better way is to compare
            # the value in stats of the stack and look for changes in count of
            # `message_reject_rx` but that is not very helpful either since
            # it does not record which node rejected
            delay = 3 if not_acked_nodes else 0
            self._schedule(partial(self.resendRequests, alive_requests), delay)

    def resendRequests(self, keys, force_expect=False):
        for key, nodes in keys.items():
            if not nodes:
                continue
            request = self.reqRepStore.getRequest(*key)
            logger.debug('{} resending request {} to {}'.
                         format(self, request, nodes))
            self.sendToNodes(request, nodes)
            now = time.perf_counter()
            for queue in [self.expectingAcksFor, self.expectingRepliesFor]:
                if key in queue:
                    _, _, retries = queue[key]
                    queue[key] = (nodes, now, retries + 1)
                elif force_expect:
                    queue[key] = (nodes, now, 1)

    def sendLedgerStatus(self, nodeName: str):
        ledgerStatus = LedgerStatus(
            POOL_LEDGER_ID,
            self.ledger.size,
            None,
            None,
            self.ledger.root_hash)
        rid = self.nodestack.getRemote(nodeName).uid
        self.send(ledgerStatus, rid)

    def send(self, msg: Any, *rids: Iterable[int], signer: Signer = None):
        return self.nodestack.send(msg, *rids, signer=signer)

    def sendToNodes(self, msg: Any, names: Iterable[str]):
        rids = [rid for rid, r in self.nodestack.remotes.items()
                if r.name in names]
        return self.send(msg, *rids)

    @staticmethod
    def verifyMerkleProof(*replies: Tuple[Reply]) -> bool:
        """
        Verifies the correctness of the merkle proof provided in the reply from
        the node. Returns True if verified to be correct, throws an exception
        otherwise.

        :param replies: One or more replies for which Merkle Proofs have to be
        verified
        :raises ProofError: The proof is invalid
        :return: True
        """
        verifier = MerkleVerifier()
        serializer = ledger_txn_serializer
        ignored = {F.auditPath.name, F.seqNo.name, F.rootHash.name}
        for r in replies:
            seqNo = r[f.RESULT.nm][F.seqNo.name]
            rootHash = Ledger.strToHash(
                r[f.RESULT.nm][F.rootHash.name])
            auditPath = [Ledger.strToHash(a) for a in
                         r[f.RESULT.nm][F.auditPath.name]]
            filtered = dict((k, v) for (k, v) in r[f.RESULT.nm].items()
                            if k not in ignored)
            result = serializer.serialize(filtered)
            verifier.verify_leaf_inclusion(result, seqNo - 1,
                                           auditPath,
                                           STH(tree_size=seqNo,
                                               sha256_root_hash=rootHash))
        return True

    def registerObserver(self, observer: Callable, name=None):
        if not name:
            name = uuid.uuid4()
        if name in self._observers or observer in self._observerSet:
            raise RuntimeError("Observer {} already registered".format(name))
        self._observers[name] = observer
        self._observerSet.add(observer)

    def deregisterObserver(self, name):
        if name not in self._observers:
            raise RuntimeError("Observer {} not registered".format(name))
        self._observerSet.remove(self._observers[name])
        del self._observers[name]

    def hasObserver(self, observer):
        return observer in self._observerSet
Beispiel #23
0
class Client(Motor,
             MessageProcessor,
             HasFileStorage,
             HasPoolManager,
             HasActionQueue):
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA]=None,
                 ha: Union[HA, Tuple[str, int]]=None,
                 basedirpath: str=None,
                 config=None,
                 sighex: str=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        signer = Signer(sighex)
        sighex = signer.keyraw
        verkey = rawToFriendly(signer.verraw)

        self.stackName = verkey
        # TODO: Have a way for a client to have a user friendly name. Does it
        # matter now, it used to matter in some CLI exampples in the past.
        # self.name = name
        self.name = self.stackName or 'Client~' + str(id(self))

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(self.stackName, basedirpath):
            cha = self.nodeStackClass.getHaFromLocal(self.stackName, basedirpath)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self, self.name, baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        # TODO: Find a proper name
        self.alias = name

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(POOL_LEDGER_ID, self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setF()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auth_mode=AuthMode.ALLOW_ANY.value)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        # TODO I think this is a bug here, sighex is getting passed in the seed parameter
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info("Client {} initialized with the following node registry:"
                        .format(self.alias))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.alias))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))

    def getReqRepStore(self):
        return ClientReqRepStoreFile(self.name, self.basedirpath)

    def getTxnLogStore(self):
        return ClientTxnLog(self.name, self.basedirpath)

    def __repr__(self):
        return self.name

    def postPoolLedgerCaughtUp(self):
        self.mode = Mode.discovered
        # For the scenario where client has already connected to nodes reading
        #  the genesis pool transactions and that is enough
        self.flushMsgsPendingConnection()

    def postTxnFromCatchupAddedToLedger(self, ledgerType: int, txn: Any):
        if ledgerType != 0:
            logger.error("{} got unknown ledger type {}".
                         format(self, ledgerType))
            return
        self.processPoolTxn(txn)

    # noinspection PyAttributeOutsideInit
    def setF(self):
        nodeCount = len(self.nodeReg)
        self.f = getMaxFailures(nodeCount)
        self.minNodesToConnect = self.f + 1
        self.totalNodes = nodeCount
        self.quorums = Quorums(nodeCount)

    @staticmethod
    def exists(name, basedirpath):
        return os.path.exists(basedirpath) and \
               os.path.exists(os.path.join(basedirpath, name))

    @property
    def nodeStackClass(self) -> NetworkInterface:
        return nodeStackClass

    def start(self, loop):
        oldstatus = self.status
        if oldstatus in Status.going():
            logger.info("{} is already {}, so start has no effect".
                        format(self.alias, self.status.name))
        else:
            super().start(loop)
            self.nodestack.start()
            self.nodestack.maintainConnections(force=True)
            if self._ledger:
                self.ledgerManager.setLedgerCanSync(0, True)
                self.mode = Mode.starting

    async def prod(self, limit) -> int:
        """
        async function that returns the number of events

        :param limit: The number of messages to be processed
        :return: The number of events up to a prescribed `limit`
        """
        s = 0
        if self.isGoing():
            s = await self.nodestack.service(limit)
            self.nodestack.serviceLifecycle()
        self.nodestack.flushOutBoxes()
        s += self._serviceActions()
        # TODO: This if condition has to be removed. `_ledger` if once set wont
        # be reset ever so in `__init__` the `prod` method should be patched.
        if self._ledger:
            s += self.ledgerManager._serviceActions()
        return s

    def submitReqs(self, *reqs: Request) -> List[Request]:
        requests = []
        for request in reqs:
            if (self.mode == Mode.discovered and self.hasSufficientConnections) or \
               (request.isForced() and self.hasAnyConnections):
                logger.debug('Client {} sending request {}'.format(self, request))
                self.send(request)
                self.expectingFor(request)
            else:
                logger.debug("{} pending request since in mode {} and "
                             "connected to {} nodes".
                             format(self, self.mode, self.nodestack.connecteds))
                self.pendReqsTillConnection(request)
            requests.append(request)
        for r in requests:
            self.reqRepStore.addRequest(r)
        return requests

    def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
        """
        Handles single message from a node, and appends it to a queue
        :param wrappedMsg: Reply received by the client from the node
        """
        self.inBox.append(wrappedMsg)
        msg, frm = wrappedMsg
        # Do not print result of transaction type `POOL_LEDGER_TXNS` on the CLI
        ledgerTxnTypes = (POOL_LEDGER_TXNS, LEDGER_STATUS, CONSISTENCY_PROOF,
                          CATCHUP_REP)
        printOnCli = not excludeFromCli and msg.get(OP_FIELD_NAME) not \
                                            in ledgerTxnTypes
        logger.info("Client {} got msg from node {}: {}".
                     format(self.name, frm, msg),
                     extra={"cli": printOnCli})
        if OP_FIELD_NAME in msg:
            if msg[OP_FIELD_NAME] in ledgerTxnTypes and self._ledger:
                cMsg = node_message_factory.get_instance(**msg)
                if msg[OP_FIELD_NAME] == POOL_LEDGER_TXNS:
                    self.poolTxnReceived(cMsg, frm)
                if msg[OP_FIELD_NAME] == LEDGER_STATUS:
                    self.ledgerManager.processLedgerStatus(cMsg, frm)
                if msg[OP_FIELD_NAME] == CONSISTENCY_PROOF:
                    self.ledgerManager.processConsistencyProof(cMsg, frm)
                if msg[OP_FIELD_NAME] == CATCHUP_REP:
                    self.ledgerManager.processCatchupRep(cMsg, frm)
            elif msg[OP_FIELD_NAME] == REQACK:
                self.reqRepStore.addAck(msg, frm)
                self.gotExpected(msg, frm)
            elif msg[OP_FIELD_NAME] == REQNACK:
                self.reqRepStore.addNack(msg, frm)
                self.gotExpected(msg, frm)
            elif msg[OP_FIELD_NAME] == REJECT:
                self.reqRepStore.addReject(msg, frm)
                self.gotExpected(msg, frm)
            elif msg[OP_FIELD_NAME] == REPLY:
                result = msg[f.RESULT.nm]
                identifier = msg[f.RESULT.nm][f.IDENTIFIER.nm]
                reqId = msg[f.RESULT.nm][f.REQ_ID.nm]
                numReplies = self.reqRepStore.addReply(identifier, reqId, frm,
                                                       result)
                self.gotExpected(msg, frm)
                self.postReplyRecvd(identifier, reqId, frm, result, numReplies)

    def postReplyRecvd(self, identifier, reqId, frm, result, numReplies):
        if not self.txnLog.hasTxn(identifier, reqId) and numReplies > self.f:
            replies = self.reqRepStore.getReplies(identifier, reqId).values()
            reply = checkIfMoreThanFSameItems(replies, self.f)
            if reply:
                self.txnLog.append(identifier, reqId, reply)
                return reply

    def _statusChanged(self, old, new):
        # do nothing for now
        pass

    def onStopping(self, *args, **kwargs):
        logger.debug('Stopping client {}'.format(self))
        self.nodestack.nextCheck = 0
        self.nodestack.stop()
        if self._ledger:
            self.ledgerManager.setLedgerState(POOL_LEDGER_ID, LedgerState.not_synced)
            self.mode = None
            self._ledger.stop()
            if self.hashStore and not self.hashStore.closed:
                self.hashStore.close()
        self.txnLog.close()

    def getReply(self, identifier: str, reqId: int) -> Optional[Reply]:
        """
        Accepts reply message from node if the reply is matching

        :param identifier: identifier of the entity making the request
        :param reqId: Request Id
        :return: Reply message only when valid and matching
        (None, NOT_FOUND)
        (None, UNCONFIRMED) f+1 not reached
        (reply, CONFIRMED) f+1 reached
        """
        try:
            cons = self.hasConsensus(identifier, reqId)
        except KeyError:
            return None, "NOT_FOUND"
        if cons:
            return cons, "CONFIRMED"
        return None, "UNCONFIRMED"

    def getRepliesFromAllNodes(self, identifier: str, reqId: int):
        """
        Accepts a request ID and return a list of results from all the nodes
        for that request

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        :return: list of request results from all nodes
        """
        return {frm: msg for msg, frm in self.inBox
                if msg[OP_FIELD_NAME] == REPLY and
                msg[f.RESULT.nm][f.REQ_ID.nm] == reqId and
                msg[f.RESULT.nm][f.IDENTIFIER.nm] == identifier}

    def hasConsensus(self, identifier: str, reqId: int) -> Optional[str]:
        """
        Accepts a request ID and returns True if consensus was reached
        for the request or else False

        :param identifier: identifier of the entity making the request
        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        if not replies:
            raise KeyError('{}{}'.format(identifier, reqId))  # NOT_FOUND
        # Check if at least f+1 replies are received or not.
        if self.quorums.reply.is_reached(len(replies)):
            onlyResults = {frm: reply["result"] for frm, reply in
                           replies.items()}
            resultsList = list(onlyResults.values())
            # if all the elements in the resultList are equal - consensus
            # is reached.
            if all(result == resultsList[0] for result in resultsList):
                return resultsList[0]  # CONFIRMED
            else:
                logger.error(
                    "Received a different result from at least one of the nodes..")
                return checkIfMoreThanFSameItems(resultsList, self.f)
        else:
            return False  # UNCONFIRMED

    def showReplyDetails(self, identifier: str, reqId: int):
        """
        Accepts a request ID and prints the reply details

        :param identifier: Client's identifier
        :param reqId: Request ID
        """
        replies = self.getRepliesFromAllNodes(identifier, reqId)
        replyInfo = "Node {} replied with result {}"
        if replies:
            for frm, reply in replies.items():
                print(replyInfo.format(frm, reply['result']))
        else:
            print("No replies received from Nodes!")

    def onConnsChanged(self, joined: Set[str], left: Set[str]):
        """
        Modify the current status of the client based on the status of the
        connections changed.
        """
        if self.isGoing():
            if len(self.nodestack.conns) == len(self.nodeReg):
                self.status = Status.started
            elif len(self.nodestack.conns) >= self.minNodesToConnect:
                self.status = Status.started_hungry
            self.flushMsgsPendingConnection()
        if self._ledger:
            for n in joined:
                self.sendLedgerStatus(n)

    def replyIfConsensus(self, identifier, reqId: int):
        replies, errors = self.reqRepStore.getAllReplies(identifier, reqId)
        r = list(replies.values())[0] if len(replies) > self.f else None
        e = list(errors.values())[0] if len(errors) > self.f else None
        return r, e

    @property
    def hasSufficientConnections(self):
        return len(self.nodestack.conns) >= self.minNodesToConnect

    @property
    def hasAnyConnections(self):
        return len(self.nodestack.conns) > 0

    def hasMadeRequest(self, identifier, reqId: int):
        return self.reqRepStore.hasRequest(identifier, reqId)

    def isRequestSuccessful(self, identifier, reqId):
        acks = self.reqRepStore.getAcks(identifier, reqId)
        nacks = self.reqRepStore.getNacks(identifier, reqId)
        f = getMaxFailures(len(self.nodeReg))
        if len(acks) > f:
            return True, "Done"
        elif len(nacks) > f:
            # TODO: What if the the nacks were different from each node?
            return False, list(nacks.values())[0]
        else:
            return None

    def pendReqsTillConnection(self, request, signer=None):
        """
        Enqueue requests that need to be submitted until the client has
        sufficient connections to nodes
        :return:
        """
        self.reqsPendingConnection.append((request, signer))
        logger.debug("{} enqueuing request since not enough connections "
                     "with nodes: {}".format(self, request))

    def flushMsgsPendingConnection(self):
        queueSize = len(self.reqsPendingConnection)
        if queueSize > 0:
            logger.debug("Flushing pending message queue of size {}"
                         .format(queueSize))
            tmp = deque()
            while self.reqsPendingConnection:
                req, signer = self.reqsPendingConnection.popleft()
                if (self.hasSufficientConnections and self.mode == Mode.discovered) or \
                   (req.isForced() and self.hasAnyConnections):
                    self.send(req, signer=signer)
                else:
                    tmp.append((req, signer))
            self.reqsPendingConnection.extend(tmp)

    def expectingFor(self, request: Request, nodes: Optional[Set[str]]=None):
        nodes = nodes or {r.name for r in self.nodestack.remotes.values()
                          if self.nodestack.isRemoteConnected(r)}
        now = time.perf_counter()
        self.expectingAcksFor[request.key] = (nodes, now, 0)
        self.expectingRepliesFor[request.key] = (copy.copy(nodes), now, 0)
        self.startRepeating(self.retryForExpected,
                            self.config.CLIENT_REQACK_TIMEOUT)

    def gotExpected(self, msg, frm):
        if msg[OP_FIELD_NAME] == REQACK:
            container = msg
            colls = (self.expectingAcksFor, )
        elif msg[OP_FIELD_NAME] == REPLY:
            container = msg[f.RESULT.nm]
            # If an REQACK sent by node was lost, the request when sent again
            # would fetch the reply or the client might just lose REQACK and not
            # REPLY so when REPLY received, request does not need to be resent
            colls = (self.expectingAcksFor, self.expectingRepliesFor)
        elif msg[OP_FIELD_NAME] in (REQNACK, REJECT):
            container = msg
            colls = (self.expectingAcksFor, self.expectingRepliesFor)
        else:
            raise RuntimeError("{} cannot retry {}".format(self, msg))

        idr = container.get(f.IDENTIFIER.nm)
        reqId = container.get(f.REQ_ID.nm)
        key = (idr, reqId)
        for coll in colls:
            if key in coll:
                if frm in coll[key][0]:
                    coll[key][0].remove(frm)
                if not coll[key][0]:
                    coll.pop(key)

        if not (self.expectingAcksFor or self.expectingRepliesFor):
            self.stopRetrying()

    def stopRetrying(self):
        self.stopRepeating(self.retryForExpected, strict=False)

    def _filterExpected(self, now, queue, retryTimeout, maxRetry):
        deadRequests = []
        aliveRequests = {}
        notAnsweredNodes = set()
        for requestKey, (expectedFrom, lastTried, retries) in queue.items():
            if now < lastTried + retryTimeout:
                continue
            if retries >= maxRetry:
                deadRequests.append(requestKey)
                continue
            if requestKey not in aliveRequests:
                aliveRequests[requestKey] = set()
            aliveRequests[requestKey].update(expectedFrom)
            notAnsweredNodes.update(expectedFrom)
        return deadRequests, aliveRequests, notAnsweredNodes

    def retryForExpected(self):
        now = time.perf_counter()

        requestsWithNoAck, aliveRequests, notAckedNodes = \
            self._filterExpected(now,
                                 self.expectingAcksFor,
                                 self.config.CLIENT_REQACK_TIMEOUT,
                                 self.config.CLIENT_MAX_RETRY_ACK)

        requestsWithNoReply, aliveRequests, notRepliedNodes = \
            self._filterExpected(now,
                                 self.expectingRepliesFor,
                                 self.config.CLIENT_REPLY_TIMEOUT,
                                 self.config.CLIENT_MAX_RETRY_REPLY)

        for requestKey in requestsWithNoAck:
            logger.debug('{} have got no ACKs for {} and will not try again'
                         .format(self, requestKey))
            self.expectingAcksFor.pop(requestKey)

        for requestKey in requestsWithNoReply:
            logger.debug('{} have got no REPLYs for {} and will not try again'
                         .format(self, requestKey))
            self.expectingRepliesFor.pop(requestKey)

        if notAckedNodes:
            logger.debug('{} going to retry for {}'
                         .format(self, self.expectingAcksFor.keys()))
        for nm in notAckedNodes:
            try:
                remote = self.nodestack.getRemote(nm)
            except RemoteNotFound:
                logger.warning('{} could not find remote {}'.format(self, nm))
                continue
            logger.debug('Remote {} of {} being joined since REQACK for not '
                         'received for request'.format(remote, self))


            # This makes client to reconnect
            # even if pool is just busy and cannot answer quickly,
            # that's why using maintainConnections instead
            # self.nodestack.connect(name=remote.name)
            self.nodestack.maintainConnections(force=True)

        if aliveRequests:
            # Need a delay in case connection has to be established with some
            # nodes, a better way is not to assume the delay value but only
            # send requests once the connection is established. Also it is
            # assumed that connection is not established if a node not sending
            # REQACK/REQNACK/REJECT/REPLY, but a little better way is to compare
            # the value in stats of the stack and look for changes in count of
            # `message_reject_rx` but that is not very helpful either since
            # it does not record which node rejected
            delay = 3 if notAckedNodes else 0
            self._schedule(partial(self.resendRequests, aliveRequests), delay)

    def resendRequests(self, keys):
        for key, nodes in keys.items():
            if not nodes:
                continue
            request = self.reqRepStore.getRequest(*key)
            logger.debug('{} resending request {} to {}'.
                         format(self, request, nodes))
            self.sendToNodes(request, nodes)
            now = time.perf_counter()
            for queue in [self.expectingAcksFor, self.expectingRepliesFor]:
                if key in queue:
                    _, _, retries = queue[key]
                    queue[key] = (nodes, now, retries + 1)

    def sendLedgerStatus(self, nodeName: str):
        ledgerStatus = LedgerStatus(POOL_LEDGER_ID, self.ledger.size, None, None,
                                    self.ledger.root_hash)
        rid = self.nodestack.getRemote(nodeName).uid
        self.send(ledgerStatus, rid)

    def send(self, msg: Any, *rids: Iterable[int], signer: Signer = None):
        self.nodestack.send(msg, *rids, signer=signer)

    def sendToNodes(self, msg: Any, names: Iterable[str]):
        rids = [rid for rid, r in self.nodestack.remotes.items() if r.name in names]
        self.send(msg, *rids)

    @staticmethod
    def verifyMerkleProof(*replies: Tuple[Reply]) -> bool:
        """
        Verifies the correctness of the merkle proof provided in the reply from
        the node. Returns True if verified to be correct, throws an exception
        otherwise.

        :param replies: One or more replies for which Merkle Proofs have to be
        verified
        :raises ProofError: The proof is invalid
        :return: True
        """
        verifier = MerkleVerifier()
        fields = getTxnOrderedFields()
        serializer = CompactSerializer(fields=fields)
        ignored = {F.auditPath.name, F.seqNo.name, F.rootHash.name, TXN_TIME}
        for r in replies:
            seqNo = r[f.RESULT.nm][F.seqNo.name]
            rootHash = Ledger.strToHash(
                r[f.RESULT.nm][F.rootHash.name])
            auditPath = [Ledger.strToHash(a) for a in
                         r[f.RESULT.nm][F.auditPath.name]]
            filtered = dict((k, v) for (k, v) in r[f.RESULT.nm].items()
                            if k not in ignored)
            result = serializer.serialize(filtered)
            verifier.verify_leaf_inclusion(result, seqNo - 1,
                                           auditPath,
                                           STH(tree_size=seqNo,
                                               sha256_root_hash=rootHash))
        return True
Beispiel #24
0
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 genesis_dir: str = None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 plugins_dir: str = None,
                 config=None,
                 sighex: str = None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()

        dataDir = self.config.clientDataDir or "data/clients"
        self.basedirpath = basedirpath or self.config.CLI_BASE_DIR
        self.basedirpath = os.path.expanduser(self.basedirpath)

        signer = Signer(sighex)
        sighex = signer.keyraw
        verkey = rawToFriendly(signer.verraw)

        self.stackName = verkey
        # TODO: Have a way for a client to have a user friendly name. Does it
        # matter now, it used to matter in some CLI exampples in the past.
        # self.name = name
        self.name = self.stackName or 'Client~' + str(id(self))

        self.genesis_dir = genesis_dir or self.basedirpath
        self.ledger_dir = ledger_dir or os.path.join(self.basedirpath, dataDir, self.name)
        self.plugins_dir = plugins_dir or self.basedirpath
        _keys_dir = keys_dir or self.basedirpath
        self.keys_dir = os.path.join(_keys_dir, "keys")

        cha = None
        if self.exists(self.stackName, self.keys_dir):
            cha = self.nodeStackClass.getHaFromLocal(
                self.stackName, self.keys_dir)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        HasFileStorage.__init__(self, self.ledger_dir)

        # TODO: Find a proper name
        self.alias = name

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                POOL_LEDGER_ID,
                self.ledger,
                preCatchupStartClbk=self.prePoolLedgerCatchup,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setPoolParams()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auth_mode=AuthMode.ALLOW_ANY.value)
        stackargs['basedirpath'] = self.keys_dir
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        # TODO I think this is a bug here, sighex is getting passed in the seed
        # parameter
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info(
                "Client {} initialized with the following node registry:".format(
                    self.alias))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.alias))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        self._observers = {}  # type Dict[str, Callable]
        self._observerSet = set()  # makes it easier to guard against duplicates

        plugins_to_load = self.config.PluginsToLoad if hasattr(self.config, "PluginsToLoad") else None
        tp = loadPlugins(self.plugins_dir, plugins_to_load)

        logger.debug("total plugins loaded in client: {}".format(tp))

        self._multi_sig_verifier = self._create_multi_sig_verifier()
        self._read_only_requests = set()
Beispiel #25
0
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None,
                 sighex: str = None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        signer = Signer(sighex)
        sighex = signer.keyraw
        verkey = rawToFriendly(signer.verraw)

        self.stackName = verkey
        # TODO: Have a way for a client to have a user friendly name. Does it
        # matter now, it used to matter in some CLI exampples in the past.
        # self.name = name
        self.name = self.stackName

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(self.stackName, basedirpath):
            cha = getHaFromLocalEstate(self.stackName, basedirpath)
            if cha:
                cha = HA(*cha)
                logger.debug(
                    "Client {} ignoring given ha {} and using {}".format(
                        self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self,
                                self.name,
                                baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        # TODO: Find a proper name
        self.alias = name

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(
                0,
                self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setF()

        stackargs = dict(
            name=self.stackName,
            ha=cha,
            main=False,  # stops incoming vacuous joins
            auto=2)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs, self.handleOneNodeMsg,
                                             self.nodeReg, sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info(
                "Client {} initialized with the following node registry:".
                format(self.alias))
            lengths = [
                max(x)
                for x in zip(*[(len(name), len(host), len(str(port)))
                               for name, (host, port) in self.nodeReg.items()])
            ]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info("Client {} found an empty node registry:".format(
                self.alias))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))