def getIdrCache(self): if self.idrCache is None: self.idrCache = IdrCache(self.name, initKeyValueStorage(self.config.idrCacheStorage, self.dataLocation, self.config.idrCacheDbName) ) return self.idrCache
def idr_cache_none_role(req_auth): cache = IdrCache("Cache", KeyValueStorageInMemory()) cache.set(req_auth.identifier, 1, int(time.time()), verkey="SomeVerkey", isCommitted=False) return cache
def idr_cache(): cache = IdrCache("Cache", KeyValueStorageInMemory()) cache.set("trustee_identifier", 1, int(time.time()), role=TRUSTEE, verkey="trustee_identifier_verkey", isCommitted=False) cache.set("steward_identifier", 2, int(time.time()), role=STEWARD, verkey="steward_identifier_verkey", isCommitted=False) cache.set("trust_anchor_identifier", 3, int(time.time()), role=TRUST_ANCHOR, verkey="trust_anchor_identifier_verkey", isCommitted=False) cache.set(OTHER_IDENTIFIER, 4, int(time.time()), role='OtherRole', verkey="other_verkey", isCommitted=False) return cache
def idr_cache(identity_owners, trustees, endorsers): cache = IdrCache("Cache", KeyValueStorageInMemory()) seq_no = 1 for identifier in identity_owners: cache.set(identifier, seq_no, int(time.time()), role="", verkey="owner_identifier_verkey", isCommitted=False) for identifier in trustees: cache.set(identifier, seq_no, int(time.time()), role=TRUSTEE, verkey="trustee_identifier_verkey", isCommitted=False) for identifier in endorsers: cache.set(identifier, seq_no, int(time.time()), role=ENDORSER, verkey="endorser_identifier_verkey", isCommitted=False) return cache
def idr_cache(tconf, tdir): name = 'name' idr_cache = IdrCache(name, initKeyValueStorage(KeyValueStorageType.Rocksdb, tdir, tconf.idrCacheDbName, db_config=tconf.db_idr_cache_db_config)) return idr_cache
def init_idr_cache_storage(self): idr_cache = IdrCache(self.node.name, initKeyValueStorage(self.node.config.idrCacheStorage, self.node.dataLocation, self.node.config.idrCacheDbName, db_config=self.node.config.db_idr_cache_db_config) ) self.node.db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache)
def request_handler(bls_store): state = PruningState(KeyValueStorageInMemory()) cache = IdrCache('Cache', KeyValueStorageInMemory()) attr_store = AttributeStore(KeyValueStorageInMemory()) return DomainReqHandler(ledger=None, state=state, requestProcessor=None, idrCache=cache, attributeStore=attr_store, bls_store=bls_store)
def db_manager(tconf, tdir): db_manager = DatabaseManager() name = 'name' idr_cache = IdrCache( name, initKeyValueStorage(KeyValueStorageType.Rocksdb, tdir, tconf.idrCacheDbName, db_config=tconf.db_idr_cache_db_config)) db_manager.register_new_store('idr', idr_cache) db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), State()) return db_manager
def db_manager(tconf, tdir): db_manager = DatabaseManager() state = State() state.txn_list = {} state.get = lambda key, isCommitted=True: state.txn_list.get(key, None) state.set = lambda key, value: state.txn_list.update({key: value}) name = 'name' idr_cache = IdrCache(name, initKeyValueStorage(KeyValueStorageType.Rocksdb, tdir, tconf.idrCacheDbName, db_config=tconf.db_idr_cache_db_config)) db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache) db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), state) return db_manager
def idr_cache(req_auth): cache = IdrCache("Cache", KeyValueStorageInMemory()) cache.set(req_auth.identifier, 1, int(time.time()), role=STEWARD, verkey="SomeVerkey", isCommitted=False), cache.set("some_identifier2", 1, int(time.time()), role=STEWARD, verkey="SomeVerkey2", isCommitted=False) return cache
def request_handler(bls_store): state = PruningState(KeyValueStorageInMemory()) config_state = PruningState(KeyValueStorageInMemory()) state_serializer = ConstraintsSerializer(domain_state_serializer) cache = IdrCache('Cache', KeyValueStorageInMemory()) attr_store = AttributeStore(KeyValueStorageInMemory()) write_req_validator = WriteRequestValidator( config=FakeSomething(authPolicy=CONFIG_LEDGER_AUTH_POLICY), auth_map=auth_map, cache=cache, config_state=config_state, state_serializer=state_serializer) return DomainReqHandler(ledger=None, state=state, config=None, requestProcessor=None, idrCache=cache, attributeStore=attr_store, bls_store=bls_store, write_req_validator=write_req_validator, ts_store=None)
def make_idr_cache(): kvs = KeyValueStorageInMemory() cache = IdrCache("TestCache", kvs) return cache
class Node(PlenumNode): keygenScript = "init_indy_keys" client_request_class = SafeRequest TxnUtilConfig.client_request_class = Request _info_tool_class = ValidatorNodeInfoTool def __init__(self, name, clientAuthNr=None, ha=None, cliname=None, cliha=None, config_helper=None, ledger_dir: str = None, keys_dir: str = None, genesis_dir: str = None, plugins_dir: str = None, node_info_dir: str = None, primaryDecider=None, pluginPaths: Iterable[str] = None, storage=None, config=None): config = config or getConfig() config_helper = config_helper or NodeConfigHelper(name, config) ledger_dir = ledger_dir or config_helper.ledger_dir keys_dir = keys_dir or config_helper.keys_dir genesis_dir = genesis_dir or config_helper.genesis_dir plugins_dir = plugins_dir or config_helper.plugins_dir node_info_dir = node_info_dir or config_helper.node_info_dir # TODO: 4 ugly lines ahead, don't know how to avoid self.idrCache = None self.attributeStore = None self.upgrader = None self.restarter = None self.poolCfg = None super().__init__(name=name, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, config_helper=config_helper, ledger_dir=ledger_dir, keys_dir=keys_dir, genesis_dir=genesis_dir, plugins_dir=plugins_dir, node_info_dir=node_info_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=config) self.upgrader = self.init_upgrader() self.restarter = self.init_restarter() self.poolCfg = self.init_pool_config() # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr() # Will be refactored soon self.get_req_handler(CONFIG_LEDGER_ID).upgrader = self.upgrader self.get_req_handler(CONFIG_LEDGER_ID).poolCfg = self.poolCfg self.actionReqHandler.poolCfg = self.poolCfg self.actionReqHandler.restarter = self.restarter def init_pool_config(self): return PoolConfig(self.configLedger) def on_inconsistent_3pc_state(self): timeout = self.config.INCONSISTENCY_WATCHER_NETWORK_TIMEOUT logger.warning( "Suspecting inconsistent 3PC state, going to restart in {} seconds" .format(timeout)) now = get_utc_datetime() when = now + timedelta(seconds=timeout) self.restarter.requestRestart(when) def init_domain_ledger(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.genesis_dir, self.config.domainTransactionsFile) return Ledger( CompactMerkleTree(hashStore=self.getHashStore('domain')), dataDir=self.dataLocation, fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config) def init_upgrader(self): return Upgrader(self.id, self.name, self.dataLocation, self.config, self.configLedger, actionFailedCallback=self.postConfigLedgerCaughtUp, action_start_callback=self.notify_upgrade_start) def init_restarter(self): return Restarter(self.id, self.name, self.dataLocation, self.config) def init_pool_req_handler(self): return PoolRequestHandler(self.poolLedger, self.states[POOL_LEDGER_ID], self.states, self.getIdrCache(), self.write_req_validator) def init_domain_req_handler(self): if self.attributeStore is None: self.attributeStore = self.init_attribute_store() return DomainReqHandler(self.domainLedger, self.states[DOMAIN_LEDGER_ID], self.config, self.reqProcessors, self.getIdrCache(), self.attributeStore, self.bls_bft.bls_store, self.write_req_validator, self.getStateTsDbStorage()) def init_config_req_handler(self): return ConfigReqHandler(self.configLedger, self.states[CONFIG_LEDGER_ID], self.getIdrCache(), self.upgrader, self.poolManager, self.poolCfg, self.write_req_validator) def getIdrCache(self): if self.idrCache is None: self.idrCache = IdrCache( self.name, initKeyValueStorage( self.config.idrCacheStorage, self.dataLocation, self.config.idrCacheDbName, db_config=self.config.db_idr_cache_db_config)) return self.idrCache def init_attribute_store(self): return AttributeStore( initKeyValueStorage(self.config.attrStorage, self.dataLocation, self.config.attrDbName, db_config=self.config.db_attr_db_config)) def init_action_req_handler(self): return ActionReqHandler(self.getIdrCache(), self.restarter, self.poolManager, self.poolCfg, self._info_tool, self.write_req_validator) def post_txn_from_catchup_added_to_domain_ledger(self, txn): pass def postPoolLedgerCaughtUp(self, **kwargs): # The only reason to override this is to set the correct node id in # the upgrader since when the upgrader is initialized, node might not # have its id since it maybe missing the complete pool ledger. self.upgrader.nodeId = self.id super().postPoolLedgerCaughtUp(**kwargs) def postConfigLedgerCaughtUp(self, **kwargs): self.poolCfg.processLedger() self.upgrader.processLedger() super().postConfigLedgerCaughtUp(**kwargs) self.acknowledge_upgrade() def acknowledge_upgrade(self): if not self.upgrader.should_notify_about_upgrade_result(): return lastUpgradeVersion = self.upgrader.lastActionEventInfo[2] action = COMPLETE if self.upgrader.didLastExecutedUpgradeSucceeded else FAIL logger.info( '{} found the first run after upgrade, sending NODE_UPGRADE {} to version {}' .format(self, action, lastUpgradeVersion)) op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: lastUpgradeVersion } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) request = self.wallet.signRequest( Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION)) self.startedProcessingReq(request.key, self.nodestack.name) self.send(request) self.upgrader.notified_about_action_result() def notify_upgrade_start(self): scheduled_upgrade_version = self.upgrader.scheduledAction[0] action = IN_PROGRESS logger.info('{} is about to be upgraded, ' 'sending NODE_UPGRADE {} to version {}'.format( self, action, scheduled_upgrade_version)) op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: scheduled_upgrade_version } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) # do not send protocol version before all Nodes support it after Upgrade request = self.wallet.signRequest( Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION)) self.startedProcessingReq(request.key, self.nodestack.name) self.send(request) def processNodeRequest(self, request: Request, frm: str): if request.operation[TXN_TYPE] == NODE_UPGRADE: try: self.nodeAuthNr.authenticate(request.operation[DATA], request.identifier, request.operation[f.SIG.nm]) except BaseException as ex: logger.warning( 'The request {} failed to authenticate {}'.format( request, repr(ex))) return if not self.isProcessingReq(request.key): self.startedProcessingReq(request.key, frm) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self.recordAndPropagate(request, frm) def validateNodeMsg(self, wrappedMsg): msg, frm = wrappedMsg if all(attr in msg.keys() for attr in [OPERATION, f.IDENTIFIER.nm, f.REQ_ID.nm]) \ and msg.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: cls = self.client_request_class cMsg = cls(**msg) return cMsg, frm else: return super().validateNodeMsg(wrappedMsg) def authNr(self, req): # TODO: Assumption that NODE_UPGRADE can be sent by nodes only if req.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: return self.nodeAuthNr else: return super().authNr(req) def init_core_authenticator(self): return LedgerBasedAuthNr(self.idrCache) def defaultNodeAuthNr(self): return NodeAuthNr(self.poolLedger) async def prod(self, limit: int = None) -> int: c = await super().prod(limit) c += self.upgrader.service() c += self.restarter.service() return c def can_write_txn(self, txn_type): return self.poolCfg.isWritable() or txn_type in [ POOL_UPGRADE, POOL_CONFIG, AUTH_RULE ] def execute_domain_txns(self, ppTime, reqs: List[Request], stateRoot, txnRoot) -> List: """ Execute the REQUEST sent to this Node :param ppTime: the time at which PRE-PREPARE was sent :param req: the client REQUEST """ return self.default_executer(DOMAIN_LEDGER_ID, ppTime, reqs, stateRoot, txnRoot) def update_txn_with_extra_data(self, txn): """ All the data of the transaction might not be stored in ledger so the extra data that is omitted from ledger needs to be fetched from the appropriate data store :param txn: :return: """ # For RAW and ENC attributes, only hash is stored in the ledger. if get_type(txn) == ATTRIB: txn_data = get_payload_data(txn) # The key needs to be present and not None key = RAW if (RAW in txn_data and txn_data[RAW] is not None) else \ ENC if (ENC in txn_data and txn_data[ENC] is not None) else None if key: txn_data[key] = self.attributeStore.get(txn_data[key]) return txn def closeAllKVStores(self): super().closeAllKVStores() if self.idrCache: self.idrCache.close() if self.attributeStore: self.attributeStore.close() def is_request_need_quorum(self, msg_dict: dict): txn_type = msg_dict.get(OPERATION).get(TXN_TYPE, None) \ if OPERATION in msg_dict \ else None is_force = OPERATION in msg_dict and msg_dict.get(OPERATION).get( FORCE, False) is_force_upgrade = str(is_force) == 'True' and txn_type == POOL_UPGRADE return txn_type and not is_force_upgrade and super( ).is_request_need_quorum(msg_dict) @staticmethod def add_auth_rules_to_config_state( state: PruningState, auth_map: dict, serializer: AbstractConstraintSerializer): for rule_id, auth_constraint in auth_map.items(): serialized_key = rule_id.encode() serialized_value = serializer.serialize(auth_constraint) if not state.get(serialized_key, isCommitted=False): state.set(serialized_key, serialized_value) def _init_write_request_validator(self): constraint_serializer = ConstraintsSerializer(domain_state_serializer) config_state = self.states[CONFIG_LEDGER_ID] self.write_req_validator = WriteRequestValidator( config=self.config, auth_map=auth_map, cache=self.getIdrCache(), config_state=config_state, state_serializer=constraint_serializer, anyone_can_write_map=anyone_can_write_map, metrics=self.metrics)
class Node(PlenumNode, HasPoolManager): keygenScript = "init_indy_keys" _client_request_class = SafeRequest _info_tool_class = ValidatorNodeInfoTool ledger_ids = PlenumNode.ledger_ids + [CONFIG_LEDGER_ID] def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, basedirpath=None, base_data_dir=None, primaryDecider=None, pluginPaths: Iterable[str] = None, storage=None, config=None): self.config = config or getConfig() # TODO: 3 ugly lines ahead, don't know how to avoid # self.stateTreeStore = None self.idrCache = None self.attributeStore = None super().__init__(name=name, nodeRegistry=nodeRegistry, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, basedirpath=basedirpath, base_data_dir=base_data_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=self.config) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.configLedger = self.getConfigLedger() self.ledgerManager.addLedger( CONFIG_LEDGER_ID, self.configLedger, postCatchupCompleteClbk=self.postConfigLedgerCaughtUp, postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger) self.on_new_ledger_added(CONFIG_LEDGER_ID) self.states[CONFIG_LEDGER_ID] = self.loadConfigState() self.upgrader = self.getUpgrader() self.poolCfg = self.getPoolConfig() self.configReqHandler = self.getConfigReqHandler() self.initConfigState() self.requestExecuter[CONFIG_LEDGER_ID] = self.executeConfigTxns self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr() def getPoolConfig(self): return PoolConfig(self.configLedger) def initPoolManager(self, nodeRegistry, ha, cliname, cliha): HasPoolManager.__init__(self, nodeRegistry, ha, cliname, cliha) def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.basedirpath, self.config.domainTransactionsFile) return Ledger( CompactMerkleTree(hashStore=self.getHashStore('domain')), dataDir=self.dataLocation, fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config) def send_ledger_status_to_newly_connected_node(self, node_name): super().send_ledger_status_to_newly_connected_node(node_name) # If the domain ledger is already synced send config ledger status # else after the domain ledger is caught up, config ledger status # will be sent if self.ledgerManager.ledgerRegistry[ DOMAIN_LEDGER_ID].state == LedgerState.synced: self.sendConfigLedgerStatus(node_name) def getUpgrader(self): return Upgrader(self.id, self.name, self.dataLocation, self.config, self.configLedger, upgradeFailedCallback=self.postConfigLedgerCaughtUp, upgrade_start_callback=self.notify_upgrade_start) def getDomainReqHandler(self): if self.attributeStore is None: self.attributeStore = self.loadAttributeStore() return DomainReqHandler(self.domainLedger, self.states[DOMAIN_LEDGER_ID], self.reqProcessors, self.getIdrCache(), self.attributeStore, self.bls_bft.bls_store) def getIdrCache(self): if self.idrCache is None: self.idrCache = IdrCache( self.name, initKeyValueStorage(self.config.idrCacheStorage, self.dataLocation, self.config.idrCacheDbName)) return self.idrCache def getConfigLedger(self): hashStore = LevelDbHashStore(dataDir=self.dataLocation, fileNamePrefix='config') return Ledger(CompactMerkleTree(hashStore=hashStore), dataDir=self.dataLocation, fileName=self.config.configTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability) def loadConfigState(self): return PruningState( initKeyValueStorage(self.config.configStateStorage, self.dataLocation, self.config.configStateDbName)) def loadAttributeStore(self): return AttributeStore( initKeyValueStorage(self.config.attrStorage, self.dataLocation, self.config.attrDbName)) def getConfigReqHandler(self): return ConfigReqHandler(self.configLedger, self.states[CONFIG_LEDGER_ID], self.getIdrCache(), self.upgrader, self.poolManager, self.poolCfg) def initConfigState(self): self.initStateFromLedger(self.states[CONFIG_LEDGER_ID], self.configLedger, self.configReqHandler) def start_config_ledger_sync(self): self._sync_ledger(CONFIG_LEDGER_ID) self.ledgerManager.processStashedLedgerStatuses(CONFIG_LEDGER_ID) def post_txn_from_catchup_added_to_domain_ledger(self, txn): pass def sendConfigLedgerStatus(self, nodeName): self.sendLedgerStatus(nodeName, CONFIG_LEDGER_ID) @property def configLedgerStatus(self): return self.build_ledger_status(CONFIG_LEDGER_ID) def getLedgerStatus(self, ledgerId: int): if ledgerId == CONFIG_LEDGER_ID: return self.configLedgerStatus else: return super().getLedgerStatus(ledgerId) def postPoolLedgerCaughtUp(self, **kwargs): # The only reason to override this is to set the correct node id in # the upgrader since when the upgrader is initialized, node might not # have its id since it maybe missing the complete pool ledger. self.upgrader.nodeId = self.id super().postPoolLedgerCaughtUp(**kwargs) def catchup_next_ledger_after_pool(self): self.start_config_ledger_sync() def postConfigLedgerCaughtUp(self, **kwargs): self.poolCfg.processLedger() self.upgrader.processLedger() self.start_domain_ledger_sync() self.acknowledge_upgrade() def acknowledge_upgrade(self): if self.upgrader.should_notify_about_upgrade_result(): logger.debug('{} found the first run after upgrade, ' 'sending NODE_UPGRADE'.format(self)) lastUpgradeVersion = self.upgrader.lastUpgradeEventInfo[2] action = COMPLETE if self.upgrader.didLastExecutedUpgradeSucceeded else FAIL op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: lastUpgradeVersion } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) request = self.wallet.signOp(op) self.startedProcessingReq(*request.key, self.nodestack.name) self.send(request) def notify_upgrade_start(self): logger.info('{} is about to be upgraded, ' 'sending NODE_UPGRADE'.format(self)) scheduled_upgrade_version = self.upgrader.scheduledUpgrade[0] action = IN_PROGRESS op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: scheduled_upgrade_version } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) request = self.wallet.signOp(op) self.startedProcessingReq(*request.key, self.nodestack.name) self.send(request) def processNodeRequest(self, request: Request, frm: str): if request.operation[TXN_TYPE] == NODE_UPGRADE: try: self.nodeAuthNr.authenticate(request.operation[DATA], request.identifier, request.operation[f.SIG.nm]) except BaseException as ex: logger.warning( 'The request {} failed to authenticate {}'.format( request, repr(ex))) return if not self.isProcessingReq(*request.key): self.startedProcessingReq(*request.key, frm) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self.recordAndPropagate(request, frm) def postRecvTxnFromCatchup(self, ledgerId: int, txn: Any): if ledgerId == CONFIG_LEDGER_ID: # Since no config ledger transactions are applied to the state return None else: return super().postRecvTxnFromCatchup(ledgerId, txn) def validateNodeMsg(self, wrappedMsg): msg, frm = wrappedMsg if all(attr in msg.keys() for attr in [OPERATION, f.IDENTIFIER.nm, f.REQ_ID.nm]) \ and msg.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: cls = self._client_request_class cMsg = cls(**msg) return cMsg, frm else: return super().validateNodeMsg(wrappedMsg) def authNr(self, req): # TODO: Assumption that NODE_UPGRADE can be sent by nodes only if req.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: return self.nodeAuthNr else: return super().authNr(req) def isSignatureVerificationNeeded(self, msg: Any): op = msg.get(OPERATION) if op: if op.get(TXN_TYPE) in openTxns: return False return True def doStaticValidation(self, identifier, reqId, operation): super().doStaticValidation(identifier, reqId, operation) unknownKeys = set(operation.keys()).difference(set(allOpKeys)) if unknownKeys: raise InvalidClientRequest( identifier, reqId, 'invalid keys "{}"'.format(",".join(unknownKeys))) missingKeys = set(reqOpKeys).difference(set(operation.keys())) if missingKeys: raise InvalidClientRequest( identifier, reqId, 'missing required keys "{}"'.format(",".join(missingKeys))) if operation[TXN_TYPE] not in validTxnTypes: raise InvalidClientRequest( identifier, reqId, 'invalid {}: {}'.format(TXN_TYPE, operation[TXN_TYPE])) typ = operation.get(TXN_TYPE) ledgerId = self.ledgerId(typ) if ledgerId == DOMAIN_LEDGER_ID: self.reqHandler.doStaticValidation(identifier, reqId, operation) return if ledgerId == CONFIG_LEDGER_ID: self.configReqHandler.doStaticValidation(identifier, reqId, operation) def doDynamicValidation(self, request: Request): """ State based validation """ if self.ledgerIdForRequest(request) == CONFIG_LEDGER_ID: self.configReqHandler.validate(request, self.config) else: super().doDynamicValidation(request) def defaultAuthNr(self): return TxnBasedAuthNr(self.idrCache) def defaultNodeAuthNr(self): return NodeAuthNr(self.poolLedger) async def prod(self, limit: int = None) -> int: c = await super().prod(limit) c += self.upgrader.service() return c def processRequest(self, request: Request, frm: str): if request.operation[TXN_TYPE] == GET_NYM: self.send_ack_to_client(request.key, frm) result = self.reqHandler.handleGetNymReq(request, frm) self.transmitToClient(Reply(result), frm) self.total_read_request_number += 1 elif request.operation[TXN_TYPE] == GET_SCHEMA: self.send_ack_to_client(request.key, frm) # TODO: `handleGetSchemaReq` should be changed to # `get_reply_for_schema_req`, the rationale being that the method # is not completely handling the request but fetching a response. # Similar reasoning follows for other methods below result = self.reqHandler.handleGetSchemaReq(request, frm) self.transmitToClient(Reply(result), frm) self.total_read_request_number += 1 elif request.operation[TXN_TYPE] == GET_ATTR: self.send_ack_to_client(request.key, frm) result = self.reqHandler.handleGetAttrsReq(request, frm) self.transmitToClient(Reply(result), frm) self.total_read_request_number += 1 elif request.operation[TXN_TYPE] == GET_CLAIM_DEF: self.send_ack_to_client(request.key, frm) result = self.reqHandler.handleGetClaimDefReq(request, frm) self.transmitToClient(Reply(result), frm) self.total_read_request_number += 1 elif request.operation[TXN_TYPE] == GET_TXNS: super().processRequest(request, frm) else: # forced request should be processed before consensus if (request.operation[TXN_TYPE] in [POOL_UPGRADE, POOL_CONFIG ]) and request.isForced(): self.configReqHandler.validate(request) self.configReqHandler.applyForced(request) # here we should have write transactions that should be processed # only on writable pool if self.poolCfg.isWritable() or (request.operation[TXN_TYPE] in [POOL_UPGRADE, POOL_CONFIG]): super().processRequest(request, frm) else: raise InvalidClientRequest( request.identifier, request.reqId, 'Pool is in readonly mode, try again in 60 seconds') @classmethod def ledgerId(cls, txnType: str): # It was called ledgerTypeForTxn before if txnType in POOL_TXN_TYPES: return POOL_LEDGER_ID if txnType in IDENTITY_TXN_TYPES: return DOMAIN_LEDGER_ID if txnType in CONFIG_TXN_TYPES: return CONFIG_LEDGER_ID @property def ledgers(self): ledgers = super().ledgers ledgers.append(self.configLedger) return ledgers def applyReq(self, request: Request, cons_time): """ Apply request to appropriate ledger and state """ if self.__class__.ledgerIdForRequest(request) == CONFIG_LEDGER_ID: return self.configReqHandler.apply(request, cons_time) else: return super().applyReq(request, cons_time) def executeDomainTxns(self, ppTime, reqs: List[Request], stateRoot, txnRoot) -> List: """ Execute the REQUEST sent to this Node :param ppTime: the time at which PRE-PREPARE was sent :param req: the client REQUEST """ return self.commitAndSendReplies(self.reqHandler, ppTime, reqs, stateRoot, txnRoot) def executeConfigTxns(self, ppTime, reqs: List[Request], stateRoot, txnRoot) -> List: return self.commitAndSendReplies(self.configReqHandler, ppTime, reqs, stateRoot, txnRoot) def update_txn_with_extra_data(self, txn): """ All the data of the transaction might not be stored in ledger so the extra data that is omitted from ledger needs to be fetched from the appropriate data store :param txn: :return: """ # For RAW and ENC attributes, only hash is stored in the ledger. if txn[TXN_TYPE] == ATTRIB: # The key needs to be present and not None key = RAW if (RAW in txn and txn[RAW] is not None) else \ ENC if (ENC in txn and txn[ENC] is not None) else \ None if key: txn[key] = self.attributeStore.get(txn[key]) return txn def closeAllKVStores(self): super().closeAllKVStores() if self.idrCache: self.idrCache.close() if self.attributeStore: self.attributeStore.close() def onBatchCreated(self, ledgerId, stateRoot): if ledgerId == CONFIG_LEDGER_ID: self.configReqHandler.onBatchCreated(stateRoot) else: super().onBatchCreated(ledgerId, stateRoot) def onBatchRejected(self, ledgerId): if ledgerId == CONFIG_LEDGER_ID: self.configReqHandler.onBatchRejected() else: super().onBatchRejected(ledgerId)
def getIdrCache(self): return IdrCache( self.name, initKeyValueStorage(self.config.idrCacheStorage, self.dataLocation, self.config.idrCacheDbName))
def idr_cache(): cache = IdrCache("Cache", KeyValueStorageInMemory()) # authors cache.set("author_did_no_role", 1, int(time.time()), role=IDENTITY_OWNER, verkey="SomeVerkey", isCommitted=False) cache.set("author_did_trustee", 1, int(time.time()), role=TRUSTEE, verkey="SomeVerkey1", isCommitted=False) cache.set("author_did_steward", 1, int(time.time()), role=STEWARD, verkey="SomeVerkey2", isCommitted=False) cache.set("author_did_endorser", 1, int(time.time()), role=ENDORSER, verkey="SomeVerkey3", isCommitted=False) cache.set("author_did_network_monitor", 1, int(time.time()), role=NETWORK_MONITOR, verkey="SomeVerkey5", isCommitted=False) # endorsers cache.set("endorser_did_no_role", 1, int(time.time()), role=IDENTITY_OWNER, verkey="SomeVerkey4", isCommitted=False) cache.set("endorser_did_trustee", 1, int(time.time()), role=TRUSTEE, verkey="SomeVerkey1", isCommitted=False) cache.set("endorser_did_steward", 1, int(time.time()), role=STEWARD, verkey="SomeVerkey2", isCommitted=False) cache.set("endorser_did_endorser", 1, int(time.time()), role=ENDORSER, verkey="SomeVerkey3", isCommitted=False) cache.set("endorser_did_network_monitor", 1, int(time.time()), role=NETWORK_MONITOR, verkey="SomeVerkey5", isCommitted=False) return cache
class Node(PlenumNode, HasPoolManager): keygenScript = "init_indy_keys" _client_request_class = SafeRequest _info_tool_class = ValidatorNodeInfoTool def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, config_helper=None, ledger_dir: str = None, keys_dir: str = None, genesis_dir: str = None, plugins_dir: str = None, node_info_dir: str = None, primaryDecider=None, pluginPaths: Iterable[str]=None, storage=None, config=None): config = config or getConfig() config_helper = config_helper or NodeConfigHelper(name, config) ledger_dir = ledger_dir or config_helper.ledger_dir keys_dir = keys_dir or config_helper.keys_dir genesis_dir = genesis_dir or config_helper.genesis_dir plugins_dir = plugins_dir or config_helper.plugins_dir node_info_dir = node_info_dir or config_helper.node_info_dir # TODO: 4 ugly lines ahead, don't know how to avoid self.idrCache = None self.attributeStore = None self.upgrader = None self.poolCfg = None super().__init__(name=name, nodeRegistry=nodeRegistry, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, config_helper=config_helper, ledger_dir=ledger_dir, keys_dir=keys_dir, genesis_dir=genesis_dir, plugins_dir=plugins_dir, node_info_dir=node_info_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=config) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr() def getPoolConfig(self): return PoolConfig(self.configLedger) def initPoolManager(self, nodeRegistry, ha, cliname, cliha): HasPoolManager.__init__(self, nodeRegistry, ha, cliname, cliha) def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.genesis_dir, self.config.domainTransactionsFile) return Ledger( CompactMerkleTree( hashStore=self.getHashStore('domain')), dataDir=self.dataLocation, fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config) def getUpgrader(self): return Upgrader(self.id, self.name, self.dataLocation, self.config, self.configLedger, upgradeFailedCallback=self.postConfigLedgerCaughtUp, upgrade_start_callback=self.notify_upgrade_start) def getDomainReqHandler(self): if self.attributeStore is None: self.attributeStore = self.loadAttributeStore() return DomainReqHandler(self.domainLedger, self.states[DOMAIN_LEDGER_ID], self.config, self.reqProcessors, self.getIdrCache(), self.attributeStore, self.bls_bft.bls_store) def getIdrCache(self): if self.idrCache is None: self.idrCache = IdrCache(self.name, initKeyValueStorage(self.config.idrCacheStorage, self.dataLocation, self.config.idrCacheDbName) ) return self.idrCache def loadAttributeStore(self): return AttributeStore( initKeyValueStorage( self.config.attrStorage, self.dataLocation, self.config.attrDbName) ) def setup_config_req_handler(self): self.upgrader = self.getUpgrader() self.poolCfg = self.getPoolConfig() super().setup_config_req_handler() def getConfigReqHandler(self): return ConfigReqHandler(self.configLedger, self.states[CONFIG_LEDGER_ID], self.getIdrCache(), self.upgrader, self.poolManager, self.poolCfg) def post_txn_from_catchup_added_to_domain_ledger(self, txn): pass def postPoolLedgerCaughtUp(self, **kwargs): # The only reason to override this is to set the correct node id in # the upgrader since when the upgrader is initialized, node might not # have its id since it maybe missing the complete pool ledger. self.upgrader.nodeId = self.id super().postPoolLedgerCaughtUp(**kwargs) def postConfigLedgerCaughtUp(self, **kwargs): self.poolCfg.processLedger() self.upgrader.processLedger() super().postConfigLedgerCaughtUp(**kwargs) self.acknowledge_upgrade() def acknowledge_upgrade(self): if not self.upgrader.should_notify_about_upgrade_result(): return lastUpgradeVersion = self.upgrader.lastUpgradeEventInfo[2] action = COMPLETE if self.upgrader.didLastExecutedUpgradeSucceeded else FAIL logger.info('{} found the first run after upgrade, sending NODE_UPGRADE {} to version {}'.format( self, action, lastUpgradeVersion)) op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: lastUpgradeVersion } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) # do not send protocol version before all Nodes support it after Upgrade request = self.wallet.signRequest( Request(operation=op, protocolVersion=None)) self.startedProcessingReq(*request.key, self.nodestack.name) self.send(request) self.upgrader.notified_about_upgrade_result() def notify_upgrade_start(self): scheduled_upgrade_version = self.upgrader.scheduledUpgrade[0] action = IN_PROGRESS logger.info('{} is about to be upgraded, ' 'sending NODE_UPGRADE {} to version {}'.format(self, action, scheduled_upgrade_version)) op = { TXN_TYPE: NODE_UPGRADE, DATA: { ACTION: action, VERSION: scheduled_upgrade_version } } op[f.SIG.nm] = self.wallet.signMsg(op[DATA]) # do not send protocol version before all Nodes support it after Upgrade request = self.wallet.signRequest( Request(operation=op, protocolVersion=None)) self.startedProcessingReq(*request.key, self.nodestack.name) self.send(request) def processNodeRequest(self, request: Request, frm: str): if request.operation[TXN_TYPE] == NODE_UPGRADE: try: self.nodeAuthNr.authenticate(request.operation[DATA], request.identifier, request.operation[f.SIG.nm]) except BaseException as ex: logger.warning('The request {} failed to authenticate {}' .format(request, repr(ex))) return if not self.isProcessingReq(*request.key): self.startedProcessingReq(*request.key, frm) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self.recordAndPropagate(request, frm) def validateNodeMsg(self, wrappedMsg): msg, frm = wrappedMsg if all(attr in msg.keys() for attr in [OPERATION, f.IDENTIFIER.nm, f.REQ_ID.nm]) \ and msg.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: cls = self._client_request_class cMsg = cls(**msg) return cMsg, frm else: return super().validateNodeMsg(wrappedMsg) def authNr(self, req): # TODO: Assumption that NODE_UPGRADE can be sent by nodes only if req.get(OPERATION, {}).get(TXN_TYPE) == NODE_UPGRADE: return self.nodeAuthNr else: return super().authNr(req) def init_core_authenticator(self): return LedgerBasedAuthNr(self.idrCache) def defaultNodeAuthNr(self): return NodeAuthNr(self.poolLedger) async def prod(self, limit: int = None) -> int: c = await super().prod(limit) c += self.upgrader.service() return c def processRequest(self, request: Request, frm: str): if self.is_query(request.operation[TXN_TYPE]): self.process_query(request, frm) self.total_read_request_number += 1 else: # forced request should be processed before consensus if (request.operation[TXN_TYPE] in [ POOL_UPGRADE, POOL_CONFIG]) and request.isForced(): self.configReqHandler.validate(request) self.configReqHandler.applyForced(request) # here we should have write transactions that should be processed # only on writable pool if self.poolCfg.isWritable() or (request.operation[TXN_TYPE] in [ POOL_UPGRADE, POOL_CONFIG]): super().processRequest(request, frm) else: raise InvalidClientRequest( request.identifier, request.reqId, 'Pool is in readonly mode, try again in 60 seconds') def executeDomainTxns(self, ppTime, reqs: List[Request], stateRoot, txnRoot) -> List: """ Execute the REQUEST sent to this Node :param ppTime: the time at which PRE-PREPARE was sent :param req: the client REQUEST """ return self.default_executer(DOMAIN_LEDGER_ID, ppTime, reqs, stateRoot, txnRoot) def update_txn_with_extra_data(self, txn): """ All the data of the transaction might not be stored in ledger so the extra data that is omitted from ledger needs to be fetched from the appropriate data store :param txn: :return: """ # For RAW and ENC attributes, only hash is stored in the ledger. if txn[TXN_TYPE] == ATTRIB: # The key needs to be present and not None key = RAW if (RAW in txn and txn[RAW] is not None) else \ ENC if (ENC in txn and txn[ENC] is not None) else \ None if key: txn[key] = self.attributeStore.get(txn[key]) return txn def closeAllKVStores(self): super().closeAllKVStores() if self.idrCache: self.idrCache.close() if self.attributeStore: self.attributeStore.close()
def idr_cache(): cache = IdrCache("Cache", KeyValueStorageInMemory()) i = 0 for id in IDENTIFIERS[TRUSTEE]: i += 1 cache.set(id, i, int(time.time()), role=TRUSTEE, verkey="trustee_identifier_verkey", isCommitted=False) for id in IDENTIFIERS[STEWARD]: i += 1 cache.set(id, i, int(time.time()), role=STEWARD, verkey="steward_identifier_verkey", isCommitted=False) for id in IDENTIFIERS[ENDORSER]: i += 1 cache.set(id, i, int(time.time()), role=ENDORSER, verkey="endorser_identifier_verkey", isCommitted=False) for id in IDENTIFIERS[NETWORK_MONITOR]: i += 1 cache.set(id, i, int(time.time()), role=NETWORK_MONITOR, verkey="network_monitor_identifier_verkey", isCommitted=False) for id in IDENTIFIERS["OtherRole"]: i += 1 cache.set(id, i, int(time.time()), role='OtherRole', verkey="other_verkey", isCommitted=False) for id in IDENTIFIERS[None]: i += 1 cache.set(id, i, int(time.time()), role=None, verkey="identity_owner_verkey", isCommitted=False) return cache