def runAgent(agent, looper=None, bootstrap=None): assert agent def is_connected(agent): client = agent.client if (client.mode is None) or (not client.can_send_write_requests()): raise NotConnectedToNetwork("Client hasn't finished catch-up with Pool Ledger yet or " "doesn't have sufficient number of connections") async def wait_until_connected(agent): from stp_core.loop.eventually import eventually await eventually(is_connected, agent, timeout=CONNECTION_TIMEOUT, retryWait=2) def do_run(looper): agent.loop = looper.loop looper.add(agent) logger.info("Running {} now (port: {})".format(agent.name, agent.port)) if bootstrap: looper.run(wait_until_connected(agent)) looper.run(runBootstrap(bootstrap)) if looper: do_run(looper) else: with Looper(debug=getConfig().LOOPER_DEBUG, loop=agent.loop) as looper: do_run(looper) looper.run()
def __init__(self, name: str, basedirpath: str, client: Client = None, wallet: Wallet = None, port: int = None, loop=None, config=None, endpointArgs=None): config = config or getConfig() basedirpath = basedirpath or os.path.expanduser(config.baseDir) portParam, _ = self.getPassedArgs() self.logger = getlogger() super().__init__(name, basedirpath, client, wallet, portParam or port, loop=loop, config=config, endpointArgs=endpointArgs) self.claimVersionNumber = 0.01 self._invites = {} self.updateClaimVersionFile(self.getClaimVersionFileName()) signal.signal(signal.SIGTERM, self.exit_gracefully) self.setupLogging(self.getLoggerFilePath())
def explorer(): #args = read_args() config = getConfig() result = [] ledger_data_dir = get_ledger_dir("", "") read_copy_ledger_data_dir = None try: # RocksDB supports real read-only mode and does not need to have a ledger copy. if config.hashStore['type'].lower() != HS_ROCKSDB: config.db_transactions_config = None # NOTE: such approach works well only for small ledgers. tmp = make_copy_of_ledger(ledger_data_dir) # Let's be paranoid to avoid removing of ledger instead of its copy. ledger_path = Path(ledger_data_dir) ledger_copy_path = Path(tmp) assert ledger_path != ledger_copy_path assert ledger_copy_path not in ledger_path.parents read_copy_ledger_data_dir = tmp ledger_data_dir = read_copy_ledger_data_dir elif config.db_transactions_config is not None: # This allows to avoid debug logs creation on each read_ledger run config.db_transactions_config['db_log_dir'] = '/dev/null' storage = get_storage("domain", ledger_data_dir) finally: if read_copy_ledger_data_dir: shutil.rmtree(read_copy_ledger_data_dir) for seqNo, txn in storage.iterator(start=0, end=100): txn = ledger_txn_serializer.deserialize(txn) serializer = JsonSerializer() x = serializer.serialize(txn, toBytes=False) result.append(x) return result
def runAgent(agent, looper=None, bootstrap=None): assert agent def is_connected(agent): client = agent.client if (client.mode is None) or (not client.can_send_write_requests()): raise NotConnectedToNetwork( "Client hasn't finished catch-up with Pool Ledger yet or " "doesn't have sufficient number of connections") async def wait_until_connected(agent): from stp_core.loop.eventually import eventually await eventually(is_connected, agent, timeout=CONNECTION_TIMEOUT, retryWait=2) def do_run(looper): agent.loop = looper.loop looper.add(agent) logger.info("Running {} now (port: {})".format(agent.name, agent.port)) if bootstrap: looper.run(wait_until_connected(agent)) looper.run(runBootstrap(bootstrap)) if looper: do_run(looper) else: with Looper(debug=getConfig().LOOPER_DEBUG, loop=agent.loop) as looper: do_run(looper) looper.run()
def __init__(self, ledger, state, config, requestProcessor, idrCache, attributeStore, bls_store, ts_store=None): super().__init__(ledger, state, config, requestProcessor, bls_store, ts_store=ts_store) self.idrCache = idrCache self.attributeStore = attributeStore self.static_validation_handlers = {} self.dynamic_validation_handlers = {} self.state_update_handlers = {} self.query_handlers = {} self.post_batch_creation_handlers = [] self.post_batch_commit_handlers = [] self.post_batch_rejection_handlers = [] self.write_req_validator = WriteRequestValidator( config=getConfig(), auth_map=auth_map, cache=self.idrCache, anyone_can_write_map=anyone_can_write_map) self._add_default_handlers()
def put_load(): port = genHa()[1] ha = HA('0.0.0.0', port) name = "hello" wallet = Wallet(name) wallet.addIdentifier( signer=DidSigner(seed=b'000000000000000000000000Steward1')) client = Client(name, ha=ha) with Looper(debug=getConfig().LOOPER_DEBUG) as looper: looper.add(client) print('Will send {} reqs in all'.format(numReqs)) requests = sendRandomRequests(wallet, client, numReqs) start = perf_counter() for i in range(0, numReqs, numReqs // splits): print('Will wait for {} now'.format(numReqs // splits)) s = perf_counter() reqs = requests[i:i + numReqs // splits + 1] waitForSufficientRepliesForRequests(looper, client, requests=reqs, customTimeoutPerReq=100, override_timeout_limit=True) print('>>> Got replies for {} requests << in {}'. format(numReqs // splits, perf_counter() - s)) end = perf_counter() print('>>>Total {} in {}<<<'.format(numReqs, end - start)) exit(0)
def __init__(self, name: str = None, basedirpath: str = None, client: Client = None, port: int = None, loop=None, config=None, endpointArgs=None): self.endpoint = None if port: checkPortAvailable(HA("0.0.0.0", port)) Motor.__init__(self) self.loop = loop or asyncio.get_event_loop() self._eventListeners = {} # Dict[str, set(Callable)] self._name = name or 'Agent' self._port = port self.config = config or getConfig() self.basedirpath = basedirpath or os.path.expanduser( self.config.baseDir) self.endpointArgs = endpointArgs # Client used to connect to Indy and forward on owner's txns self._client = client # type: Client # known identifiers of this agent's owner self.ownerIdentifiers = {} # type: Dict[Identifier, Identity] self.logger = logger
def testScheduleNodeUpgrade(nodeSet): """ Tests that upgrade scheduling works. For that it starts mock control service, schedules upgrade for near future and then checks that service received notification. """ from indy_common.config_util import getConfig config = getConfig() loop = asyncio.get_event_loop() server, indicator = loop.run_until_complete( _createServer(host=config.controlServiceHost, port=config.controlServicePort)) indicator.add_done_callback(_stopServer(server)) node = nodeSet[0] # ATTENTION! nodeId and ledger must not be None, but there # we do not call methods that use them, so we can pass None # We do it because node from nodeSet is some testable object, not real # node, so it has no nodeId and ledger that we can use upgrader = Upgrader(nodeId=None, nodeName=None, dataDir=node.dataLocation, config=config, ledger=None) upgrader._callUpgradeAgent(time.time(), "1.2", failTimeout=1000, upgrade_id=None) result = loop.run_until_complete(eventuallySoon(_checkFuture(indicator))) expectedResult = UpgradeMessage(version) assert result == expectedResult.toJson()
def migrate_all(): node_name = get_node_name() if node_name is None: return False config = getConfig() config_helper = NodeConfigHelper(node_name, config) if BUILDER_NET_NETWORK_NAME != config.NETWORK_NAME: logger.info("This script can be used only for {} network".format(BUILDER_NET_NETWORK_NAME)) return False path_to_config_state = os.path.join(config_helper.ledger_dir, config.configStateDbName) path_to_config_ts_db = os.path.join(config_helper.ledger_dir, config.configStateTsDbName) if not os.path.exists(path_to_config_ts_db): logger.error("Path {} to config's timestamp storage does not exist".format(path_to_config_ts_db)) return False if not os.path.exists(path_to_config_state): logger.error("Path {} to config_state storage does not exist".format(path_to_config_state)) return False if not remove_dir(path_to_config_ts_db): logger.error("Failed to remove {}".format(path_to_config_ts_db)) return False if not remove_dir(path_to_config_state): logger.error("Failed to remove {}".format(path_to_config_state)) return False logger.info("Config state storage was successfully removed. Path was {}".format(path_to_config_state)) return True
def __init__(self, name: str = None, nodeReg: Dict[str, HA] = None, ha: Union[HA, Tuple[str, int]] = None, peerHA: Union[HA, Tuple[str, int]] = None, basedirpath: str = None, config=None, sighex: str = None): config = config or getConfig() super().__init__(name, nodeReg, ha, basedirpath, config, sighex) self.autoDiscloseAttributes = False self.requestedPendingTxns = False self.hasAnonCreds = bool(peerHA) if self.hasAnonCreds: self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA) stackargs = dict(name=self.stackName, ha=peerHA, main=True, auth_mode=AuthMode.ALLOW_ANY.value) self.peerMsgRoutes = [] self.peerMsgRouter = Router(*self.peerMsgRoutes) self.peerStack = self.peerStackClass( stackargs, msgHandler=self.handlePeerMessage) self.peerStack.sign = self.sign self.peerInbox = deque() self._observers = {} # type Dict[str, Callable] self._observerSet = set( ) # makes it easier to guard against duplicates
def put_load(): port = genHa()[1] ha = HA('0.0.0.0', port) name = "hello" wallet = Wallet(name) wallet.addIdentifier(signer=DidSigner( seed=b'000000000000000000000000Steward1')) client = Client(name, ha=ha) with Looper(debug=getConfig().LOOPER_DEBUG) as looper: looper.add(client) print('Will send {} reqs in all'.format(numReqs)) requests = sendRandomRequests(wallet, client, numReqs) start = perf_counter() for i in range(0, numReqs, numReqs // splits): print('Will wait for {} now'.format(numReqs // splits)) s = perf_counter() reqs = requests[i:i + numReqs // splits + 1] waitForSufficientRepliesForRequests(looper, client, requests=reqs, customTimeoutPerReq=100, override_timeout_limit=True) print('>>> Got replies for {} requests << in {}'.format( numReqs // splits, perf_counter() - s)) end = perf_counter() print('>>>Total {} in {}<<<'.format(numReqs, end - start)) exit(0)
def __init__(self, name: str=None, basedirpath: str=None, client: Client=None, port: int=None, loop=None, config=None, endpointArgs=None): self.endpoint = None if port: checkPortAvailable(HA("0.0.0.0", port)) Motor.__init__(self) self.loop = loop or asyncio.get_event_loop() self._eventListeners = {} # Dict[str, set(Callable)] self._name = name or 'Agent' self._port = port self.config = config or getConfig() self.basedirpath = basedirpath or os.path.expanduser( self.config.CLI_BASE_DIR) self.endpointArgs = endpointArgs # Client used to connect to Indy and forward on owner's txns self._client = client # type: Client # known identifiers of this agent's owner self.ownerIdentifiers = {} # type: Dict[Identifier, Identity] self.logger = logger
def __init__(self, name: str = None, nodeReg: Dict[str, HA] = None, ha: Union[HA, Tuple[str, int]] = None, peerHA: Union[HA, Tuple[str, int]] = None, basedirpath: str = None, config=None, sighex: str = None): config = config or getConfig() super().__init__(name, nodeReg, ha, basedirpath, config, sighex) self.autoDiscloseAttributes = False self.requestedPendingTxns = False self.hasAnonCreds = bool(peerHA) if self.hasAnonCreds: self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA) stackargs = dict(name=self.stackName, ha=peerHA, main=True, auth_mode=AuthMode.ALLOW_ANY.value) self.peerMsgRoutes = [] self.peerMsgRouter = Router(*self.peerMsgRoutes) self.peerStack = self.peerStackClass( stackargs, msgHandler=self.handlePeerMessage) self.peerStack.sign = self.sign self.peerInbox = deque() # To let client send this transactions to just one node self._read_only_requests = { GET_NYM, GET_ATTR, GET_CLAIM_DEF, GET_SCHEMA }
def createAgent(agentClass, name, wallet=None, basedirpath=None, port=None, loop=None, clientClass=Client): config = getConfig() if not wallet: wallet = Wallet(name) wallet.addIdentifier(signer=DidSigner( seed=randomString(32).encode('utf-8'))) if not basedirpath: basedirpath = config.baseDir if not port: _, port = genHa() client = create_client(base_dir_path=basedirpath, client_class=clientClass) return agentClass(basedirpath=basedirpath, client=client, wallet=wallet, port=port, loop=loop)
def _tconf(general_config, client_temp_dir): config = getConfig(general_config_dir=general_config) for k, v in overriddenConfigValues.items(): setattr(config, k, v) config.MinSepBetweenNodeUpgrades = 5 config.CLI_BASE_DIR = client_temp_dir config.CLI_NETWORK_DIR = os.path.join(config.CLI_BASE_DIR, 'networks') return config
def __init__(self, timeout: int = TIMEOUT, backup_format: str = BACKUP_FORMAT, test_mode: bool = False, deps: List[str] = DEPS, backup_target: str = None, files_to_preserve: List[str] = None, backup_dir: str = None, backup_name_prefix: str = None, backup_num: int = BACKUP_NUM, hold_ext: str = '', config=None): self.config = config or getConfig() assert self.config.UPGRADE_ENTRY, "UPGRADE_ENTRY config parameter must be set" self.upgrade_entry = self.config.UPGRADE_ENTRY self.test_mode = test_mode self.timeout = timeout or TIMEOUT config_helper = ConfigHelper(self.config) self.backup_dir = backup_dir or config_helper.backup_dir self.backup_target = backup_target or config_helper.genesis_dir self.tmp_dir = TMP_DIR self.backup_format = backup_format self.ext_ver = None self.deps = deps _files_to_preserve = [ self.config.lastRunVersionFile, self.config.nextVersionFile, self.config.upgradeLogFile, self.config.lastVersionFilePath, self.config.restartLogFile ] self.files_to_preserve = files_to_preserve or _files_to_preserve self.backup_num = backup_num _backup_name_prefix = '{}_backup_'.format(self.config.NETWORK_NAME) self.backup_name_prefix = backup_name_prefix or _backup_name_prefix self.packages_to_hold = ' '.join([PACKAGES_TO_HOLD, hold_ext]) # Create a TCP/IP socket self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server.setblocking(0) # Bind the socket to the port self.server_address = ('localhost', 30003) logger.info('Node control tool is starting up on {} port {}'.format( *self.server_address)) self.server.bind(self.server_address) # Listen for incoming connections self.server.listen(1)
def __init__(self, ledger: Ledger, state: State, states, idrCache: IdrCache): super().__init__(ledger, state, states) self.stateSerializer = pool_state_serializer self.idrCache = idrCache self.write_req_validator = WriteRequestValidator(config=getConfig(), auth_map=authMap, cache=self.idrCache, anyone_can_write_map=anyoneCanWriteMap)
def _tconf(general_config): config = getConfig(general_config_dir=general_config) for k, v in overriddenConfigValues.items(): setattr(config, k, v) config.MinSepBetweenNodeUpgrades = 5 config.RETRY_TIMEOUT_RESTRICTED = 15 config.RETRY_TIMEOUT_NOT_RESTRICTED = 15 config.MAX_RECONNECT_RETRY_ON_SAME_SOCKET = 1 return config
def __init__(self, name, clientAuthNr=None, ha=None, cliname=None, cliha=None, config_helper=None, ledger_dir: str = None, keys_dir: str = None, genesis_dir: str = None, plugins_dir: str = None, node_info_dir: str = None, primaryDecider=None, pluginPaths: Iterable[str] = None, storage=None, config=None, bootstrap_cls=NodeBootstrap): config = config or getConfig() config_helper = config_helper or NodeConfigHelper(name, config) ledger_dir = ledger_dir or config_helper.ledger_dir keys_dir = keys_dir or config_helper.keys_dir genesis_dir = genesis_dir or config_helper.genesis_dir plugins_dir = plugins_dir or config_helper.plugins_dir node_info_dir = node_info_dir or config_helper.node_info_dir # TODO: 4 ugly lines ahead, don't know how to avoid # self.idrCache = None # self.attributeStore = None self.upgrader = None self.restarter = None self.poolCfg = None super().__init__(name=name, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, config_helper=config_helper, ledger_dir=ledger_dir, keys_dir=keys_dir, genesis_dir=genesis_dir, plugins_dir=plugins_dir, node_info_dir=node_info_dir, pluginPaths=pluginPaths, storage=storage, config=config, bootstrap_cls=bootstrap_cls) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr() self.db_manager.set_txn_version_controller(TxnVersionController())
def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, config_helper=None, ledger_dir: str = None, keys_dir: str = None, genesis_dir: str = None, plugins_dir: str = None, node_info_dir: str = None, primaryDecider=None, pluginPaths: Iterable[str] = None, storage=None, config=None): config = config or getConfig() config_helper = config_helper or NodeConfigHelper(name, config) ledger_dir = ledger_dir or config_helper.ledger_dir keys_dir = keys_dir or config_helper.keys_dir genesis_dir = genesis_dir or config_helper.genesis_dir plugins_dir = plugins_dir or config_helper.plugins_dir node_info_dir = node_info_dir or config_helper.node_info_dir # TODO: 4 ugly lines ahead, don't know how to avoid self.idrCache = None self.attributeStore = None self.stateTsDbStorage = None self.upgrader = None self.poolCfg = None super().__init__(name=name, nodeRegistry=nodeRegistry, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, config_helper=config_helper, ledger_dir=ledger_dir, keys_dir=keys_dir, genesis_dir=genesis_dir, plugins_dir=plugins_dir, node_info_dir=node_info_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=config) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr()
def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, config_helper=None, ledger_dir: str = None, keys_dir: str = None, genesis_dir: str = None, plugins_dir: str = None, node_info_dir: str = None, primaryDecider=None, pluginPaths: Iterable[str]=None, storage=None, config=None): config = config or getConfig() config_helper = config_helper or NodeConfigHelper(name, config) ledger_dir = ledger_dir or config_helper.ledger_dir keys_dir = keys_dir or config_helper.keys_dir genesis_dir = genesis_dir or config_helper.genesis_dir plugins_dir = plugins_dir or config_helper.plugins_dir node_info_dir = node_info_dir or config_helper.node_info_dir # TODO: 4 ugly lines ahead, don't know how to avoid self.idrCache = None self.attributeStore = None self.upgrader = None self.poolCfg = None super().__init__(name=name, nodeRegistry=nodeRegistry, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, config_helper=config_helper, ledger_dir=ledger_dir, keys_dir=keys_dir, genesis_dir=genesis_dir, plugins_dir=plugins_dir, node_info_dir=node_info_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=config) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr()
def create_client(base_dir_path=None, client_class=Client): config = getConfig() if not base_dir_path: base_dir_path = config.baseDir _, clientPort = genHa() client = client_class(randomString(6), ha=("0.0.0.0", clientPort), basedirpath=base_dir_path) return client
def create_client(base_dir_path=None, client_class=Client): config = getConfig() if not base_dir_path: base_dir_path = config.CLI_BASE_DIR _, clientPort = genHa() client = client_class(randomString(6), ha=("0.0.0.0", clientPort), basedirpath=base_dir_path) return client
def dynamic_validation(self, request: Request): self._validate_request_type(request) identifier, req_id, operation = get_request_data(request) status = '*' pkt_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) if pkt_to_upgrade: currentVersion, cur_deps = self.curr_pkt_info(pkt_to_upgrade) if not currentVersion: raise InvalidClientRequest( identifier, req_id, "Packet {} is not installed and cannot be upgraded".format( pkt_to_upgrade)) if all([APP_NAME not in d for d in cur_deps]): raise InvalidClientRequest( identifier, req_id, "Packet {} doesn't belong to pool".format(pkt_to_upgrade)) else: raise InvalidClientRequest(identifier, req_id, "Upgrade packet name is empty") targetVersion = operation[VERSION] reinstall = operation.get(REINSTALL, False) if not Upgrader.is_version_upgradable(currentVersion, targetVersion, reinstall): # currentVersion > targetVersion raise InvalidClientRequest(identifier, req_id, "Version is not upgradable") action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get(NAME, None) == operation.get( NAME, None) and get_payload_data(txn).get( VERSION) == operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, '*') if status == START and action == START: raise InvalidClientRequest( identifier, req_id, "Upgrade '{}' is already scheduled".format( operation.get(NAME))) if status == '*': auth_action = AuthActionAdd(txn_type=POOL_UPGRADE, field=ACTION, value=action) else: auth_action = AuthActionEdit(txn_type=POOL_UPGRADE, field=ACTION, old_value=status, new_value=action) self.write_request_validator.validate(request, [auth_action])
def get_pool_ledger(node_name): config = getConfig() config_helper = NodeConfigHelper(node_name, config) genesis_txn_initiator = GenesisTxnInitiatorFromFile(config_helper.genesis_dir, config.poolTransactionsFile) hash_store = initHashStore(config_helper.ledger_dir, "pool", config) return Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir, fileName=config.poolTransactionsFile, ensureDurability=config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator)
def run_agent(cls, agent: Agent, looper=None, bootstrap=None, with_cli=False): try: config = getConfig() if with_cli: runAgentCli(agent, config, looper=looper, bootstrap=bootstrap) else: runAgent(agent, looper, bootstrap) return agent except Exception as exc: error = "Agent startup failed: [cause : {}]".format(str(exc)) logger.error(getFormattedErrorMsg(error))
def __init__(self, ledger, state, idrCache: IdrCache, upgrader: Upgrader, poolManager, poolCfg: PoolConfig): super().__init__(ledger, state) self.idrCache = idrCache self.upgrader = upgrader self.poolManager = poolManager self.poolCfg = poolCfg self.write_req_validator = WriteRequestValidator( config=getConfig(), auth_map=authMap, cache=self.idrCache, anyone_can_write_map=anyoneCanWriteMap)
def __init__(self, idrCache: IdrCache, restarter: Restarter, poolManager, poolCfg: PoolConfig, info_tool: ValidatorNodeInfoTool): self.idrCache = idrCache self.restarter = restarter self.info_tool = info_tool self.poolManager = poolManager self.poolCfg = poolCfg self.write_req_validator = WriteRequestValidator(config=getConfig(), auth_map=authMap, cache=self.idrCache, anyone_can_write_map=anyoneCanWriteMap)
def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, basedirpath=None, base_data_dir=None, primaryDecider=None, pluginPaths: Iterable[str] = None, storage=None, config=None): self.config = config or getConfig() # TODO: 3 ugly lines ahead, don't know how to avoid # self.stateTreeStore = None self.idrCache = None self.attributeStore = None super().__init__(name=name, nodeRegistry=nodeRegistry, clientAuthNr=clientAuthNr, ha=ha, cliname=cliname, cliha=cliha, basedirpath=basedirpath, base_data_dir=base_data_dir, primaryDecider=primaryDecider, pluginPaths=pluginPaths, storage=storage, config=self.config) # TODO: ugly line ahead, don't know how to avoid self.clientAuthNr = clientAuthNr or self.defaultAuthNr() self.configLedger = self.getConfigLedger() self.ledgerManager.addLedger( CONFIG_LEDGER_ID, self.configLedger, postCatchupCompleteClbk=self.postConfigLedgerCaughtUp, postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger) self.on_new_ledger_added(CONFIG_LEDGER_ID) self.states[CONFIG_LEDGER_ID] = self.loadConfigState() self.upgrader = self.getUpgrader() self.poolCfg = self.getPoolConfig() self.configReqHandler = self.getConfigReqHandler() self.initConfigState() self.requestExecuter[CONFIG_LEDGER_ID] = self.executeConfigTxns self.nodeMsgRouter.routes[Request] = self.processNodeRequest self.nodeAuthNr = self.defaultNodeAuthNr()
def __init__( self, timeout: int = TIMEOUT, backup_format: str = BACKUP_FORMAT, test_mode: bool = False, deps: List[str] = DEPS, backup_target: str = None, files_to_preserve: List[str] = None, backup_dir: str = None, backup_name_prefix: str = None, backup_num: int = BACKUP_NUM, hold_ext: str = '', config=None): self.config = config or getConfig() self.test_mode = test_mode self.timeout = timeout or TIMEOUT config_helper = ConfigHelper(self.config) self.backup_dir = backup_dir or config_helper.backup_dir self.backup_target = backup_target or config_helper.genesis_dir self.tmp_dir = TMP_DIR self.backup_format = backup_format self.deps = deps _files_to_preserve = [self.config.lastRunVersionFile, self.config.nextVersionFile, self.config.upgradeLogFile, self.config.lastVersionFilePath, self.config.restartLogFile] self.files_to_preserve = files_to_preserve or _files_to_preserve self.backup_num = backup_num _backup_name_prefix = '{}_backup_'.format(self.config.NETWORK_NAME) self.backup_name_prefix = backup_name_prefix or _backup_name_prefix self.packages_to_hold = ' '.join([PACKAGES_TO_HOLD, hold_ext]) # Create a TCP/IP socket self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server.setblocking(0) # Bind the socket to the port self.server_address = ('localhost', 30003) logger.info('Node control tool is starting up on {} port {}'.format( *self.server_address)) self.server.bind(self.server_address) # Listen for incoming connections self.server.listen(1)
def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[int]): self._validate_request_type(request) identifier, req_id, operation = get_request_data(request) status = '*' pkg_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) targetVersion = operation[VERSION] reinstall = operation.get(REINSTALL, False) if not pkg_to_upgrade: raise InvalidClientRequest(identifier, req_id, "Upgrade package name is empty") try: res = self.upgrader.check_upgrade_possible(pkg_to_upgrade, targetVersion, reinstall) except Exception as exc: res = str(exc) if res: raise InvalidClientRequest(identifier, req_id, res) action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present txn = self.upgrader.get_upgrade_txn( lambda txn: get_payload_data(txn).get(NAME, None) == operation.get( NAME, None) and get_payload_data(txn).get( VERSION) == operation.get(VERSION), reverse=True) if txn: status = get_payload_data(txn).get(ACTION, '*') if status == START and action == START: raise InvalidClientRequest( identifier, req_id, "Upgrade '{}' is already scheduled".format( operation.get(NAME))) if status == '*': auth_action = AuthActionAdd(txn_type=POOL_UPGRADE, field=ACTION, value=action) else: auth_action = AuthActionEdit(txn_type=POOL_UPGRADE, field=ACTION, old_value=status, new_value=action) self.write_req_validator.validate(request, [auth_action])
def runAgent(agent, looper=None, bootstrap=None): assert agent def do_run(looper): agent.loop = looper.loop looper.add(agent) logger.info("Running {} now (port: {})".format(agent.name, agent.port)) if bootstrap: looper.run(runBootstrap(bootstrap)) if looper: do_run(looper) else: with Looper(debug=getConfig().LOOPER_DEBUG, loop=agent.loop) as looper: do_run(looper) looper.run()
def create_local_pool(node_base_dir, cli_base_dir, config=None, node_size=4): conf = config or getConfig() stewards = [] node_conf = [] nodes = [] genesis_txns = [] for i in range(node_size): w = Wallet("steward") s = Steward(wallet=w) s.wallet.addIdentifier() stewards.append(s) node_config_helper = NodeConfigHelper(conf.name, conf, chroot=node_base_dir) n_config = adict(name='Node' + str(i + 1), basedirpath=node_config_helper.ledger_dir, ha=('127.0.0.1', 9700 + (i * 2)), cliha=('127.0.0.1', 9700 + (i * 2) + 1)) n_verkey, n_bls_key, n_bls_key_proof = \ initialize_node_environment(name=n_config.name, node_config_helper=node_config_helper, override_keep=True, sigseed=randomSeed()) s.set_node(n_config, verkey=n_verkey, blskey=n_bls_key, blsley_proof=n_bls_key_proof) node_conf.append(n_config) genesis_txns += s.generate_genesis_txns() pool = None # LocalPool(genesis_txns, pool_dir, steward=stewards[0]) for c in node_conf: n = Node(**c) pool.add(n) nodes.append(n) pool.runFor(5) return pool
def __init__(self, name: str = None, basedirpath: str = None, client: Client = None, wallet: Wallet = None, port: int = None, loop=None, attrRepo=None, config=None, endpointArgs=None): Agent.__init__(self, name, basedirpath, client, port, loop=loop, config=config, endpointArgs=endpointArgs) self.config = getConfig(basedirpath) self._wallet = None self._walletSaver = None updateWalletsBaseDirNameIfOutdated(self.config) # restore any active wallet belonging to this agent self._restoreWallet() # if no persisted wallet is restored and a wallet is passed, # then use given wallet, else ignore the given wallet if not self.wallet and wallet: self.wallet = wallet # if wallet is not yet set, then create a wallet if not self.wallet: self.wallet = Wallet(name) self._attrRepo = attrRepo or AttributeRepoInMemory() Walleted.__init__(self) if self.client: self._initIssuerProverVerifier() self._restoreIssuerWallet()
def build_wallet_core(wallet_name, seed_file): config = getConfig() baseDir = os.path.expanduser(config.CLI_BASE_DIR) seedFilePath = '{}/{}'.format(baseDir, seed_file) seed = wallet_name + '0' * (32 - len(wallet_name)) # if seed file is available, read seed from it if os.path.isfile(seedFilePath): with open(seedFilePath, mode='r+') as file: seed = file.read().strip(' \t\n\r') wallet = Wallet(wallet_name) seed = bytes(seed, encoding='utf-8') wallet.addIdentifier(signer=DidSigner(seed=seed)) return wallet
def create_local_pool(base_dir, node_size=4): conf = getConfig(base_dir) pool_dir = os.path.join(base_dir, "pool") # TODO: Need to come back to this why we need this cleanup shutil.rmtree(pool_dir, ignore_errors=True) stewards = [] node_conf = [] nodes = [] genesis_txns = [] for i in range(node_size): w = Wallet("steward") s = Steward(wallet=w) s.wallet.addIdentifier() stewards.append(s) n_config = adict(name='Node' + str(i + 1), basedirpath=pool_dir, ha=('127.0.0.1', 9700 + (i * 2)), cliha=('127.0.0.1', 9700 + (i * 2) + 1)) n_verkey, n_bls_key = initialize_node_environment( name=n_config.name, base_dir=n_config.basedirpath, override_keep=True, config=conf, sigseed=randomSeed()) s.set_node(n_config, verkey=n_verkey, blskey=n_bls_key) node_conf.append(n_config) genesis_txns += s.generate_genesis_txns() pool = LocalPool(genesis_txns, pool_dir, steward=stewards[0]) for c in node_conf: n = Node(**c) pool.add(n) nodes.append(n) pool.runFor(5) return pool
def __init__(self, genesis_txns, base_dir, config=None, loop=None, steward: Steward = None): super().__init__(loop=loop) self.base_dir = base_dir self.genesis_txns = genesis_txns self.config = config or getConfig(self.base_dir) self._generate_genesis_files() self._steward = steward if steward is not None: self._steward_agent = WalletedAgent(name="steward1", basedirpath=self.base_dir, client=self.create_client( 5005), wallet=steward.wallet, port=8781) self.add(self._steward_agent)
def addNyms(): with Looper(debug=getConfig().LOOPER_DEBUG) as looper: from indy_client.test.helper import createNym # Starting clients print("Spawning client") client, wallet = spawnClient(stewardName, 5678, stewardSeed) client.registerObserver(wallet.handleIncomingReply) print("Adding it to looper") looper.add(client) print("Running it") looper.run(ensureConnectedToNodes(client)) # Creating request print("Creating request") bad = [] for seed in trustAnchorSeeds: signer = DidSigner(seed=seed.encode()) nym = signer.identifier verkey = signer.verkey # Sending requests print("Creating nym for seed {}".format(seed)) try: createNym( looper=looper, nym=nym, creatorClient=client, creatorWallet=wallet, verkey=verkey, role=TRUST_ANCHOR) print("Successfully created nym for {}".format(seed)) except Exception as ex: bad.append(seed) print("Failed to create nym for {}".format(seed)) print("=======================") if not bad: print("All nyms created successfully") else: print("Failed to created nyms for:") for nym in bad: print("-", nym) print("=======================")
def main(): config = getConfig() base_dir = config.CLI_BASE_DIR if not os.path.exists(base_dir): os.makedirs(base_dir) loadPlugins(base_dir) pool = create_local_pool(base_dir) demo_start_agents(pool, pool, pool.base_dir) curDir = os.getcwd() logFilePath = os.path.join(curDir, config.logFilePath) cli = IndyCli(looper=pool, basedirpath=pool.base_dir, logFileName=logFilePath, withNode=False) pool.run(cli.shell())
def createAgent(agentClass, name, wallet=None, basedirpath=None, port=None, loop=None, clientClass=Client): config = getConfig() if not wallet: wallet = Wallet(name) wallet.addIdentifier(signer=DidSigner( seed=randomString(32).encode('utf-8'))) if not basedirpath: basedirpath = config.CLI_BASE_DIR if not port: _, port = genHa() client = create_client(base_dir_path=basedirpath, client_class=clientClass) return agentClass(basedirpath=basedirpath, client=client, wallet=wallet, port=port, loop=loop)
def __init__(self, name: str=None, nodeReg: Dict[str, HA]=None, ha: Union[HA, Tuple[str, int]]=None, peerHA: Union[HA, Tuple[str, int]]=None, basedirpath: str=None, config=None, sighex: str=None): self.config = config or getConfig() self.setupAnoncreds() basedirpath = basedirpath or os.path.join(self.config.CLI_NETWORK_DIR, self.config.NETWORK_NAME) super().__init__(name, nodeReg, ha, basedirpath, config=config, sighex=sighex) self.autoDiscloseAttributes = False self.requestedPendingTxns = False self.hasAnonCreds = bool(peerHA) if self.hasAnonCreds: self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA) stackargs = dict(name=self.stackName, ha=peerHA, main=True, auth_mode=AuthMode.ALLOW_ANY.value) self.peerMsgRoutes = [] self.peerMsgRouter = Router(*self.peerMsgRoutes) self.peerStack = self.peerStackClass( stackargs, msgHandler=self.handlePeerMessage) self.peerStack.sign = self.sign self.peerInbox = deque() # To let client send this transactions to just one node self._read_only_requests = {GET_NYM, GET_ATTR, GET_CLAIM_DEF, GET_SCHEMA}
def create_local_pool(node_base_dir, cli_base_dir, config=None, node_size=4): conf = config or getConfig() stewards = [] node_conf = [] nodes = [] genesis_txns = [] for i in range(node_size): w = Wallet("steward") s = Steward(wallet=w) s.wallet.addIdentifier() stewards.append(s) node_config_helper = NodeConfigHelper(conf.name, conf, chroot=node_base_dir) n_config = adict(name='Node' + str(i + 1), basedirpath=node_config_helper.ledger_dir, ha=('127.0.0.1', 9700 + (i * 2)), cliha=('127.0.0.1', 9700 + (i * 2) + 1)) n_verkey, n_bls_key = initialize_node_environment(name=n_config.name, node_config_helper=node_config_helper, override_keep=True, sigseed=randomSeed()) s.set_node(n_config, verkey=n_verkey, blskey=n_bls_key) node_conf.append(n_config) genesis_txns += s.generate_genesis_txns() pool = None # LocalPool(genesis_txns, pool_dir, steward=stewards[0]) for c in node_conf: n = Node(**c) pool.add(n) nodes.append(n) pool.runFor(5) return pool
def __init__(self, name: str = None, basedirpath: str = None, client: Client = None, wallet: Wallet = None, port: int = None, loop=None, attrRepo=None, config=None, endpointArgs=None): Agent.__init__(self, name, basedirpath, client, port, loop=loop, config=config, endpointArgs=endpointArgs) self.config = getConfig(basedirpath) self._wallet = None self._walletSaver = None # restore any active wallet belonging to this agent self._restoreWallet() # if no persisted wallet is restored and a wallet is passed, # then use given wallet, else ignore the given wallet if not self.wallet and wallet: self.wallet = wallet # if wallet is not yet set, then create a wallet if not self.wallet: self.wallet = Wallet(name) self._attrRepo = attrRepo or AttributeRepoInMemory() Walleted.__init__(self) if self.client: self._initIssuerProverVerifier() self._restoreIssuerWallet()
def migrate_all(): node_name = get_node_name() if node_name is None: logger.error("Could not get node name") return False config = getConfig() config_helper = NodeConfigHelper(node_name, config) leveldb_ledger_dir = config_helper.ledger_dir rocksdb_ledger_dir = os.path.join(config_helper.ledger_data_dir, node_name + "_rocksdb") if os.path.exists(rocksdb_ledger_dir): logger.error("Temporary directory for RocksDB-based ledger exists, please remove: {}" .format(rocksdb_ledger_dir)) return False try: os.mkdir(rocksdb_ledger_dir) except Exception: logger.error(traceback.print_exc()) logger.error("Could not create temporary directory for RocksDB-based ledger: {}" .format(rocksdb_ledger_dir)) return False logger.info("Starting migration of storages from LevelDB to RocksDB...") if migrate_storages(leveldb_ledger_dir, rocksdb_ledger_dir): logger.info("All storages migrated successfully from LevelDB to RocksDB") else: logger.error("Storages migration from LevelDB to RocksDB failed!") shutil.rmtree(rocksdb_ledger_dir) return False # Archiving LevelDB-based ledger try: archive_leveldb_ledger(node_name, leveldb_ledger_dir) except Exception: logger.warning("Could not create an archive of LevelDB-based ledger, proceed anyway") # TODO: it whould be nice to open new RocksDB-based ledger # and compare root hashes with LevelDB-based ledger here # Remove LevelDB-based ledger try: shutil.rmtree(leveldb_ledger_dir) except Exception: logger.error(traceback.print_exc()) logger.error("Could not remove LevelDB-based ledger: {}" .format(leveldb_ledger_dir)) shutil.rmtree(rocksdb_ledger_dir) return False ledger_dir = leveldb_ledger_dir try: shutil.move(rocksdb_ledger_dir, ledger_dir) except Exception: logger.error(traceback.print_exc()) logger.error("Could not rename temporary RocksDB-based ledger from '{}' to '{}'" .format(rocksdb_ledger_dir, ledger_dir)) shutil.rmtree(rocksdb_ledger_dir) return False set_own_perm("indy", ledger_dir) return True
from stp_core.loop.looper import Looper from plenum.common.signer_did import DidSigner from plenum.common.types import HA from stp_core.common.log import getlogger from plenum.test.helper import eventually, eventuallyAll from indy_common.config_util import getConfig from indy_common.constants import TRUST_ANCHOR from indy_client.client.client import Client from indy_client.client.wallet.wallet import Wallet logger = getlogger() # loading cluster configuration config = getConfig() requestTTL = 10 # seconds # load test configuration assert len(sys.argv) >= 3 stewardName = sys.argv[1] stewardSeed = str.encode(sys.argv[2]) trustAnchorSeeds = sys.argv[3:] if not trustAnchorSeeds: seed_file_path = "{}/load_test_clients.list".format(os.getcwd()) trustAnchorSeeds = [] with open(seed_file_path, "r") as file: trustAnchorSeeds = [line.strip().split(":")[1] for line in file]