def log(logger: JinaLogger): logger.debug('this is test debug message') logger.info('this is test info message') logger.success('this is test success message') logger.warning('this is test warning message') logger.error('this is test error message') logger.critical('this is test critical message')
class ExecMerger(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) @requests def debug(self, docs_matrix: List[DocumentArray], **kwargs): self.logger.debug( f'received doc matrix in exec-merger with length {len(docs_matrix)}.' ) result = DocumentArray() for docs in zip(*docs_matrix): traversed_executors = [ doc.tags['traversed-executors'] for doc in docs ] shard_ids = [doc.tags['shard_id'] for doc in docs] shards = [doc.tags['shards'] for doc in docs] parallels = [doc.tags['parallel'] for doc in docs] traversed_executors = list(chain(*traversed_executors)) doc = Document() doc.tags['traversed-executors'] = traversed_executors doc.tags['shard_id'] = shard_ids doc.tags['shards'] = shards doc.tags['parallel'] = parallels doc.tags['merged'] = True result.append(doc) return result
def _fetch_docker_auth(logger: JinaLogger) -> Tuple[str, str]: """Use Hub api to get docker credentials. :param logger: the logger instance :return: a dict of specifying username and password """ with open(os.path.join(__resources_path__, 'hubapi.yml')) as fp: hubapi_yml = JAML.load(fp) hubapi_url = hubapi_yml['hubapi']['url'] + hubapi_yml['hubapi'][ 'docker_auth'] with ImportExtensions( required=True, help_text= 'Missing "requests" dependency, please do pip install "jina[http]"', ): import requests headers = { 'Accept': 'application/json', 'authorizationToken': _fetch_access_token(logger), } response = requests.get(url=f'{hubapi_url}', headers=headers) if response.status_code != requests.codes.ok: raise HubLoginRequired( f'❌ Failed to fetch docker credentials. status code {response.status_code}' ) json_response = json.loads(response.text) username = base64.b64decode( json_response['docker_username']).decode('ascii') password = base64.b64decode( json_response['docker_password']).decode('ascii') logger.debug(f'✅ Successfully fetched docker creds for user') return username, password
def restart_deployment( name: str, namespace: str, image_name: str, container_cmd: str, container_args: str, logger: JinaLogger, replicas: int, pull_policy: str, custom_resource_dir: Optional[str] = None, port_expose: Optional[int] = None, ) -> str: """Restarts a service on Kubernetes. :param name: name of the service and deployment :param namespace: k8s namespace of the service and deployment :param image_name: image for the k8s deployment :param container_cmd: command executed on the k8s pods :param container_args: arguments used for the k8s pod :param logger: used logger :param replicas: number of replicas :param pull_policy: pull policy used for fetching the Docker images from the registry. :param custom_resource_dir: Path to a folder containing the kubernetes yml template files. Defaults to the standard location jina.resources if not specified. :param port_expose: port which will be exposed by the deployed containers :return: dns name of the created service """ # we can always assume the ports are the same for all executors since they run on different k8s pods # port expose can be defined by the user if not port_expose: port_expose = 8080 port_in = 8081 port_out = 8082 port_ctrl = 8083 logger.debug( f'🔋\tReplace Deployment for "{name}" with exposed port "{port_expose}"' ) kubernetes_tools.replace( deployment_name=name, namespace_name=namespace, template='deployment', params={ 'name': name, 'namespace': namespace, 'image': image_name, 'replicas': replicas, 'command': container_cmd, 'args': container_args, 'port_expose': port_expose, 'port_in': port_in, 'port_out': port_out, 'port_ctrl': port_ctrl, 'pull_policy': pull_policy, }, custom_resource_dir=custom_resource_dir, ) return f'{name}.{namespace}.svc'
class SlowInitExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) self.logger.debug('Start sleep in SlowInitExecutor') time.sleep(10.0) self.logger.debug('Sleep over in SlowInitExecutor')
def dummy_dumper_image(logger: JinaLogger): image, build_logs = client.images.build( path=os.path.join(cur_dir, 'dummy-dumper'), tag='dummy-dumper:0.1.1' ) for chunk in build_logs: if 'stream' in chunk: for line in chunk['stream'].splitlines(): logger.debug(line) return image.tags[-1]
def test_executor_image(logger: JinaLogger): image, build_logs = client.images.build( path=os.path.join(cur_dir, 'test-executor'), tag='test-executor:0.13.1' ) for chunk in build_logs: if 'stream' in chunk: for line in chunk['stream'].splitlines(): logger.debug(line) return image.tags[-1]
def build_docker_image(image_name, image_name_tag_map): logger = JinaLogger('kubernetes-testing') image_tag = image_name + ':' + image_name_tag_map[image_name] image, build_logs = client.images.build(path=os.path.join( cur_dir, image_name), tag=image_tag) for chunk in build_logs: if 'stream' in chunk: for line in chunk['stream'].splitlines(): logger.debug(line) return image.tags[-1]
class MongoDBHandler: """ Mongodb Handler to connect to the database & insert documents in the collection MongoDB has no access control by default, hence can be used without username:password. If username & password are passed, we need to create it (can be changed to existing un:pw) """ def __init__(self, hostname: str = '127.0.0.1', port: int = 27017, username: str = None, password: str = None, database: str = 'defaultdb', collection: str = 'defaultcol'): self.logger = JinaLogger(self.__class__.__name__) self.hostname = hostname self.port = port self.username = username self.password = password self.database_name = database self.collection_name = collection if self.username and self.password: self.connection_string = \ f'mongodb://{self.username}:{self.password}@{self.hostname}:{self.port}' else: self.connection_string = \ f'mongodb://{self.hostname}:{self.port}' def __enter__(self): return self.connect() def connect(self) -> 'MongoDBHandler': import pymongo try: self.client = pymongo.MongoClient(self.connection_string) self.client.admin.command('ismaster') self.logger.info('Successfully connected to the database') except pymongo.errors.ConnectionFailure: raise MongoDBException('Database server is not available') except pymongo.errors.ConfigurationError: raise MongoDBException('Credentials passed are not correct!') except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp) except Exception as exp: raise MongoDBException(exp) return self @property def database(self): return self.client[self.database_name] @property def collection(self): return self.database[self.collection_name] def find(self, key: int) -> Optional[bytes]: import pymongo try: cursor = self.collection.find({'_id': key}) cursor_contents = list(cursor) if cursor_contents: return cursor_contents[0] return None except pymongo.errors.PyMongoError as exp: self.logger.error( f'Got an error while finding a document in the db {exp}') def insert(self, documents: Iterator[Dict]) -> Optional[str]: import pymongo try: result = self.collection.insert_many(documents) self.logger.debug( f'inserted {len(result.inserted_ids)} documents in the database' ) return result.inserted_ids except pymongo.errors.PyMongoError as exp: self.logger.error( f'got an error while inserting a document in the db {exp}') def __exit__(self, exc_type, exc_val, exc_tb): import pymongo try: self.client.close() except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp) def delete(self, keys: Iterator[int], *args, **kwargs): import pymongo try: count = self.collection.delete_many({ '_id': { '$in': list(keys) } }).deleted_count self.logger.debug(f'deleted {count} documents in the database') except pymongo.errors.PyMongoError as exp: self.logger.error( f'got an error while deleting a document in the db {exp}') def update(self, keys: Iterator[int], values: Iterator[bytes], *args, **kwargs): import pymongo try: # update_many updates several keys with the same op. / data. # we need this instead count = 0 for k, new_doc in zip(keys, values): new_doc = {'_id': k, 'values': new_doc} inserted_doc = self.collection.find_one_and_replace({'_id': k}, new_doc) if inserted_doc == new_doc: count += 1 self.logger.debug(f'updated {count} documents in the database') return except pymongo.errors.PyMongoError as exp: self.logger.error( f'got an error while updating documents in the db {exp}')
def run( args: 'argparse.Namespace', name: str, container_name: str, net_mode: Optional[str], runtime_ctrl_address: str, envs: Dict, is_started: Union['multiprocessing.Event', 'threading.Event'], is_shutdown: Union['multiprocessing.Event', 'threading.Event'], is_ready: Union['multiprocessing.Event', 'threading.Event'], ): """Method to be run in a process that stream logs from a Container This method is the target for the Pod's `thread` or `process` .. note:: :meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process. Hence, please do not raise any exception here. .. note:: Please note that env variables are process-specific. Subprocess inherits envs from the main process. But Subprocess's envs do NOT affect the main process. It does NOT mess up user local system envs. :param args: namespace args from the Pod :param name: name of the Pod to have proper logging :param container_name: name to set the Container to :param net_mode: The network mode where to run the container :param runtime_ctrl_address: The control address of the runtime in the container :param envs: Dictionary of environment variables to be set in the docker image :param is_started: concurrency event to communicate runtime is properly started. Used for better logging :param is_shutdown: concurrency event to communicate runtime is terminated :param is_ready: concurrency event to communicate runtime is ready to receive messages """ import docker log_kwargs = copy.deepcopy(vars(args)) log_kwargs['log_config'] = 'docker' logger = JinaLogger(name, **log_kwargs) cancel = threading.Event() fail_to_start = threading.Event() if not __windows__: try: for signame in {signal.SIGINT, signal.SIGTERM}: signal.signal(signame, lambda *args, **kwargs: cancel.set()) except (ValueError, RuntimeError) as exc: logger.warning( f' The process starting the container for {name} will not be able to handle termination signals. ' f' {repr(exc)}') else: with ImportExtensions( required=True, logger=logger, help_text= '''If you see a 'DLL load failed' error, please reinstall `pywin32`. If you're using conda, please use the command `conda install -c anaconda pywin32`''', ): import win32api win32api.SetConsoleCtrlHandler(lambda *args, **kwargs: cancel.set(), True) client = docker.from_env() try: container = _docker_run( client=client, args=args, container_name=container_name, envs=envs, net_mode=net_mode, logger=logger, ) client.close() def _is_ready(): return AsyncNewLoopRuntime.is_ready(runtime_ctrl_address) def _is_container_alive(container) -> bool: import docker.errors try: container.reload() except docker.errors.NotFound: return False return True async def _check_readiness(container): while (_is_container_alive(container) and not _is_ready() and not cancel.is_set()): await asyncio.sleep(0.1) if _is_container_alive(container): is_started.set() is_ready.set() else: fail_to_start.set() async def _stream_starting_logs(container): for line in container.logs(stream=True): if (not is_started.is_set() and not fail_to_start.is_set() and not cancel.is_set()): await asyncio.sleep(0.01) msg = line.decode().rstrip() # type: str logger.debug(re.sub(r'\u001b\[.*?[@-~]', '', msg)) async def _run_async(container): await asyncio.gather(*[ _check_readiness(container), _stream_starting_logs(container) ]) asyncio.run(_run_async(container)) finally: client.close() if not is_started.is_set(): logger.error( f' Process terminated, the container fails to start, check the arguments or entrypoint' ) is_shutdown.set() logger.debug(f'process terminated')
class TestExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) self._name = self.runtime_args.name @requests(on='/debug') def debug(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) key = 'traversed-executors' for doc in docs: if key not in doc.tags: doc.tags[key] = [] traversed = list(doc.tags.get(key)) traversed.append(self._name) doc.tags[key] = traversed doc.tags['parallel'] = self.runtime_args.replicas doc.tags['shards'] = self.runtime_args.shards doc.tags['shard_id'] = self.runtime_args.shard_id doc.tags['hostname'] = socket.gethostname() @requests(on='/env') def env(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) for doc in docs: doc.tags['k1'] = os.environ.get('k1') doc.tags['k2'] = os.environ.get('k2') doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL') doc.tags['env'] = { 'k1': os.environ.get('k1'), 'k2': os.environ.get('k2') } @requests(on='/cuda') def cuda(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) import kubernetes from kubernetes import client api_client = client.ApiClient() core_client = client.CoreV1Api(api_client=api_client) try: # try loading kube config from disk first kubernetes.config.load_kube_config() except kubernetes.config.config_exception.ConfigException: # if the config could not be read from disk, try loading in cluster config # this works if we are running inside k8s kubernetes.config.load_incluster_config() pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod] pod_spec = pods[0].spec # V1PodSpec pod_container = pod_spec.containers[0] # V1Container pod_resources = pod_container.resources # V1ResourceRequirements for doc in docs: doc.tags['resources']['limits'] = pod_resources.limits @requests(on='/workspace') def foo_workspace(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) self.logger.debug(f'Workspace {self.workspace}.') for doc in docs: doc.tags['workspace'] = self.workspace
class JinaDProcessTarget: """Target to be executed on JinaD Process""" def __call__( self, args: 'argparse.Namespace', is_started: Union['multiprocessing.Event', 'threading.Event'], is_shutdown: Union['multiprocessing.Event', 'threading.Event'], is_ready: Union['multiprocessing.Event', 'threading.Event'], is_cancelled: Union['multiprocessing.Event', 'threading.Event'], envs: Optional[Dict] = None, ): """Method responsible to manage a remote Pod This method is the target for the Pod's `thread` or `process` .. note:: Please note that env variables are process-specific. Subprocess inherits envs from the main process. But Subprocess's envs do NOT affect the main process. It does NOT mess up user local system envs. :param args: namespace args from the Pod :param is_started: concurrency event to communicate runtime is properly started. Used for better logging :param is_shutdown: concurrency event to communicate runtime is terminated :param is_ready: concurrency event to communicate runtime is ready to receive messages :param is_cancelled: concurrency event to receive cancelling signal from the Pod. Needed by some runtimes :param envs: a dictionary of environment variables to be passed to remote Pod """ self.args = args self.envs = envs self.is_started = is_started self.is_shutdown = is_shutdown self.is_ready = is_ready self.is_cancelled = is_cancelled self.pod_id = None self._logger = JinaLogger('RemotePod', **vars(args)) run_async(self._run) async def _run(self): """Manage a remote Pod""" try: await self._create_remote_pod() except Exception as ex: self._logger.error( f'{ex!r} while starting a remote Pod' + f'\n add "--quiet-error" to suppress the exception details' if not self.args.quiet_error else '', exc_info=not self.args.quiet_error, ) else: self.is_started.set() self.is_ready.set() await self._wait_until_cancelled() finally: await self._terminate_remote_pod() self.is_shutdown.set() self._logger.debug('JinaDProcessTarget terminated') async def _create_remote_pod(self): """Create Workspace, Pod on remote JinaD server""" with ImportExtensions(required=True): # rich & aiohttp are used in `AsyncJinaDClient` import rich import aiohttp from daemon.clients import AsyncJinaDClient assert rich assert aiohttp # NOTE: args.timeout_ready is always set to -1 for JinadRuntime so that wait_for_success doesn't fail in Pod, # so it can't be used for Client timeout. self.client = AsyncJinaDClient(host=self.args.host, port=self.args.port_jinad, logger=self._logger) if not await self.client.alive: raise DaemonConnectivityError # Create a remote workspace with upload_files workspace_id = await self.client.workspaces.create( paths=self.filepaths, id=self.args.workspace_id, complete=True, ) if not workspace_id: self._logger.critical(f'remote workspace creation failed') raise DaemonWorkspaceCreationFailed payload = replace_enum_to_str(vars(self._mask_args())) # Create a remote Pod in the above workspace success, response = await self.client.pods.create( workspace_id=workspace_id, payload=payload, envs=self.envs) if not success: self._logger.critical(f'remote pod creation failed') raise DaemonPodCreationFailed(response) else: self.pod_id = response async def _sleep_forever(self): """Sleep forever, no prince will come.""" await asyncio.sleep(1e10) async def _wait_until_cancelled(self): while not self.is_cancelled.is_set(): await asyncio.sleep(0.1) async def _terminate_remote_pod(self): """Removes the remote Pod""" if self.pod_id is not None: if await self.client.pods.delete(id=self.pod_id): self._logger.success( f'Successfully terminated remote Pod {self.pod_id}') # Don't delete workspace here, as other Executors might use them. # TODO(Deepankar): probably enable an arg here? @property def filepaths(self) -> List[Path]: """Get file/directories to be uploaded to remote workspace :return: filepaths to be uploaded to remote """ paths = set() if not self.args.upload_files: self._logger.warning(f'no files passed to upload to remote') else: for path in self.args.upload_files: try: fullpath = Path(complete_path(path)) paths.add(fullpath) except FileNotFoundError: self._logger.error(f'invalid path {path} passed') return list(paths) def _mask_args(self): cargs = copy.deepcopy(self.args) # TODO:/NOTE this prevents jumping from remote to another remote (Han: 2021.1.17) # _args.host = __default_host__ # host resetting disables dynamic routing. Use `disable_remote` instead cargs.disable_remote = True cargs.log_config = '' # do not use local log_config cargs.upload_files = [] # reset upload files cargs.noblock_on_start = False # wait until start success changes = [] for k, v in vars(cargs).items(): if v != getattr(self.args, k): changes.append( f'{k:>30s}: {str(getattr(self.args, k)):30s} -> {str(v):30s}' ) if changes: changes = [ 'note the following arguments have been masked or altered for remote purpose:' ] + changes self._logger.debug('\n'.join(changes)) return cargs
def send_requests( client_kwargs, rolling_event, client_ready_to_send_event, exception_to_raise_event, ): from jina.logging.logger import JinaLogger from jina.clients import Client _logger = JinaLogger('test_send_requests') _logger.debug(f' send request start') try: client = Client(**client_kwargs) client.show_progress = True _logger.debug(f' Client instantiated with {client_kwargs}') _logger.debug(f' Set client_ready_to_send_event event') client_ready_to_send_event.set() while not rolling_event.is_set(): _logger.debug(f' event is not set') r = client.post( '/exec', [Document() for _ in range(10)], return_results=True, port_expose=9090, ) assert len(r) > 0 assert len(r[0].docs) > 0 for doc in r[0].docs: assert doc.tags['argument'] in ['value1', 'value2'] time.sleep(0.1) _logger.debug(f' event is unset') except: _logger.error(f' Some error happened while sending requests') exception_to_raise_event.set() _logger.debug(f' send requests finished')
class MongoDBHandler: """Mongodb Handler to connect to the database and can apply add, update, delete and query. MongoDB has no access control by default, hence it can be used without username:password. """ def __init__(self, hostname: str = '127.0.0.1', port: int = 27017, username: Optional[str] = None, password: Optional[str] = None, database: str = 'defaultdb', collection: str = 'defaultcol'): self.logger = JinaLogger(self.__class__.__name__) self.hostname = hostname self.port = port self.username = username self.password = password self.database_name = database self.collection_name = collection if self.username and self.password: self.connection_string = \ f'mongodb://{self.username}:{self.password}@{self.hostname}:{self.port}' else: self.connection_string = \ f'mongodb://{self.hostname}:{self.port}' def __enter__(self): return self.connect() def connect(self) -> 'MongoDBHandler': """Connect to the database. """ import pymongo try: self.client = pymongo.MongoClient(self.connection_string) self.client.admin.command('ismaster') self.logger.info('Successfully connected to the database') except pymongo.errors.ConnectionFailure: raise MongoDBException('Database server is not available') except pymongo.errors.ConfigurationError: raise MongoDBException('Credentials passed are not correct!') except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp) except Exception as exp: raise MongoDBException(exp) return self @property def database(self) -> 'Database': """ Get database. """ return self.client[self.database_name] @property def collection(self) -> 'Collection': """ Get collection. """ return self.database[self.collection_name] def query(self, key: str) -> Optional[bytes]: """ Queries the related document for the provided ``key``. :param key: id of the document """ import pymongo try: cursor = self.collection.find({'_id': key}) cursor_contents = list(cursor) if cursor_contents: return cursor_contents[0] return None except pymongo.errors.PyMongoError as exp: raise Exception( f'Got an error while finding a document in the db {exp}') def add(self, documents: Iterable[Dict]) -> Optional[str]: """ Insert the documents into the database. :param documents: documents to be inserted """ import pymongo try: result = self.collection.insert_many(documents) self.logger.debug( f'inserted {len(result.inserted_ids)} documents in the database' ) return result.inserted_ids except pymongo.errors.PyMongoError as exp: raise Exception( f'got an error while inserting a document in the db {exp}') def __exit__(self, *args): """ Make sure the connection to the database is closed. """ import pymongo try: self.client.close() except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp) def delete(self, keys: Iterable[str], *args, **kwargs): """Delete documents from the indexer. :param keys: document ids to delete related documents """ import pymongo try: count = self.collection.delete_many({ '_id': { '$in': list(keys) } }).deleted_count self.logger.debug(f'deleted {count} documents in the database') except pymongo.errors.PyMongoError as exp: raise Exception( f'got an error while deleting a document in the db {exp}') def update(self, keys: Iterable[str], values: Iterable[bytes]) -> None: """ Update the documents on the database. :param keys: document ids :param values: serialized documents """ import pymongo try: # update_many updates several keys with the same op. / data. # we need this instead count = 0 for k, new_doc in zip(keys, values): new_doc = {'_id': k, 'values': new_doc} inserted_doc = self.collection.find_one_and_replace({'_id': k}, new_doc) if inserted_doc == new_doc: count += 1 self.logger.debug(f'updated {count} documents in the database') return except pymongo.errors.PyMongoError as exp: raise Exception( f'got an error while updating documents in the db {exp}')
class MongoDBHandler: """ Mongodb Handler to connect to the database & insert documents in the collection MongoDB has no access control by default, hence can be used without username:password. If username & password are passed, we need to create it (can be changed to existing un:pw) """ def __init__(self, hostname: str = '127.0.0.1', port: int = 27017, username: str = None, password: str = None, database: str = 'defaultdb', collection: str = 'defaultcol'): self.logger = JinaLogger(self.__class__.__name__) self.hostname = hostname self.port = port self.username = username self.password = password self.database_name = database self.collection_name = collection if self.username and self.password: self.connection_string = \ f'mongodb://{self.username}:{self.password}@{self.hostname}:{self.port}' else: self.connection_string = \ f'mongodb://{self.hostname}:{self.port}' def __enter__(self): return self.connect() def connect(self) -> 'MongoDBHandler': import pymongo try: self.client = pymongo.MongoClient(self.connection_string) self.client.admin.command('ismaster') self.logger.info('Successfully connected to the database') except pymongo.errors.ConnectionFailure: raise MongoDBException('Database server is not available') except pymongo.errors.ConfigurationError: raise MongoDBException('Credentials passed are not correct!') except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp) except Exception as exp: raise MongoDBException(exp) return self @property def database(self): return self.client[self.database_name] @property def collection(self): return self.database[self.collection_name] def find(self, query: Dict[str, Union[Dict, List]]) -> None: import pymongo try: return self.collection.find(query) except pymongo.errors.PyMongoError as exp: self.logger.error( f'Got an error while finding a document in the db {exp}') def insert(self, documents: Iterator[Dict]) -> Optional[str]: import pymongo try: result = self.collection.insert_many(documents) self.logger.debug(f'inserted documents in the database') return result.inserted_ids except pymongo.errors.PyMongoError as exp: self.logger.error( f'got an error while inserting a document in the db {exp}') def __exit__(self, exc_type, exc_val, exc_tb): import pymongo try: self.client.close() except pymongo.errors.PyMongoError as exp: raise MongoDBException(exp)
def deploy_service( name: str, namespace: str, image_name: str, container_cmd: str, container_args: str, logger: JinaLogger, replicas: int, pull_policy: str, init_container: Optional[Dict] = None, custom_resource_dir: Optional[str] = None, port_expose: Optional[int] = None, env: Optional[Dict] = None, gpus: Optional[Union[int, str]] = None, ) -> str: """Deploy service on Kubernetes. :param name: name of the service and deployment :param namespace: k8s namespace of the service and deployment :param image_name: image for the k8s deployment :param container_cmd: command executed on the k8s pods :param container_args: arguments used for the k8s pod :param logger: used logger :param replicas: number of replicas :param pull_policy: pull policy used for fetching the Docker images from the registry. :param init_container: additional arguments used for the init container :param custom_resource_dir: Path to a folder containing the kubernetes yml template files. Defaults to the standard location jina.resources if not specified. :param port_expose: port which will be exposed by the deployed containers :param env: environment variables to be passed into configmap. :param gpus: number of gpus to use, for k8s requires you pass an int number, refers to the number of requested gpus. :return: dns name of the created service """ # we can always assume the ports are the same for all executors since they run on different k8s pods # port expose can be defined by the user if not port_expose: port_expose = 8080 port_in = 8081 port_out = 8082 port_ctrl = 8083 logger.debug( f'🔋\tCreate Service for "{name}" with exposed port "{port_expose}"') kubernetes_tools.create( 'service', { 'name': name, 'target': name, 'namespace': namespace, 'port_expose': port_expose, 'port_in': port_in, 'port_out': port_out, 'port_ctrl': port_ctrl, 'type': 'ClusterIP', }, logger=logger, custom_resource_dir=custom_resource_dir, ) logger.debug(f'📝\tCreate ConfigMap for deployment.') kubernetes_tools.create( 'configmap', { 'name': name, 'namespace': namespace, 'data': env, }, logger=logger, custom_resource_dir=None, ) logger.debug( f'🐳\tCreate Deployment for "{name}" with image "{image_name}", replicas {replicas} and init_container {init_container is not None}' ) deployment_params = { 'name': name, 'namespace': namespace, 'image': image_name, 'replicas': replicas, 'command': container_cmd, 'args': container_args, 'port_expose': port_expose, 'port_in': port_in, 'port_out': port_out, 'port_ctrl': port_ctrl, 'pull_policy': pull_policy, } if init_container: template_name = 'deployment-init' deployment_params = {**deployment_params, **init_container} else: template_name = 'deployment' if gpus: deployment_params['device_plugins'] = {'nvidia.com/gpu': gpus} kubernetes_tools.create( template_name, deployment_params, logger=logger, custom_resource_dir=custom_resource_dir, ) logger.debug(f'🔑\tCreate necessary permissions"') kubernetes_tools.create( 'connection-pool-role', { 'namespace': namespace, }, ) kubernetes_tools.create( 'connection-pool-role-binding', { 'namespace': namespace, }, ) return f'{name}.{namespace}.svc'
class HubIO: """:class:`HubIO` provides the way to interact with Jina Hub registry. You can use it with CLI to package a directory into a Jina Hub and publish it to the world. Examples: - :command:`jina hub push my_executor/` to push the executor package to Jina Hub - :command:`jina hub pull UUID8` to download the executor identified by UUID8 To create a :class:`HubIO` object, simply: .. highlight:: python .. code-block:: python hubio = HubIO(args) :param args: arguments """ def __init__(self, args: Optional[argparse.Namespace] = None, **kwargs): if args and isinstance(args, argparse.Namespace): self.args = args else: self.args = ArgNamespace.kwargs2namespace(kwargs, set_hub_parser()) self.logger = JinaLogger(self.__class__.__name__, **vars(args)) with ImportExtensions(required=True): import rich import cryptography import filelock assert rich #: prevent pycharm auto remove the above line assert cryptography assert filelock def new(self) -> None: """Create a new executor folder interactively.""" from rich import print, box from rich.prompt import Prompt, Confirm from rich.panel import Panel from rich.table import Table from rich.console import Console from rich.progress import track from rich.syntax import Syntax console = Console() print( Panel.fit( ''' [bold green]Executor[/bold green] is how Jina processes [bold]Document[/bold]. This guide helps you to create your own Executor in 30 seconds.''', title='Create New Executor', )) exec_name = (self.args.name if self.args.name else Prompt.ask( ':grey_question: What is the [bold]name[/bold] of your executor?\n' '[dim]CamelCase is required[/dim]', default=f'MyExecutor{random.randint(0, 100)}', )) exec_path = (self.args.path if self.args.path else Prompt.ask( ':grey_question: [bold]Which folder[/bold] to store your executor?', default=os.path.join(os.getcwd(), exec_name), )) exec_description = '{{}}' exec_keywords = '{{}}' exec_url = '{{}}' is_dockerfile = False if self.args.advance_configuration or Confirm.ask( '[green]That\'s all we need to create an Executor![/green]\n' ':grey_question: Or do you want to proceed to advanced configuration', default=False, ): exec_description = ( self.args.description if self.args.description else (Prompt.ask( ':grey_question: Please give a [bold]short description[/bold] of your executor?\n' f'[dim]Example: {exec_name} embeds images into 128-dim vectors using ResNet.[/dim]' ))) exec_keywords = (self.args.keywords if self.args.keywords else ( Prompt.ask( ':grey_question: Please give some [bold]keywords[/bold] to help people search your executor [dim](separated by comma)[/dim]\n' f'[dim]Example: image cv embedding encoding resnet[/dim]')) ) exec_url = (self.args.url if self.args.url else (Prompt.ask( ':grey_question: What is the [bold]URL[/bold] for GitHub repo?\n' f'[dim]Example: https://github.com/yourname/my-executor[/dim]') )) print( Panel.fit( ''' [bold]Dockerfile[/bold] describes how this executor will be built. It is useful when your executor has non-trivial dependencies or must be run under certain environment. - If the [bold]Dockerfile[/bold] is missing, Jina automatically generates one for you. - If you provide one, then Jina will respect the given [bold]Dockerfile[/bold].''', title='[Optional] [bold]Dockerfile[/bold]', width=80, )) is_dockerfile = self.args.add_dockerfile or Confirm.ask( ':grey_question: Do you need to write your own [bold]Dockerfile[/bold] instead of the auto-generated one?', default=False, ) print('[green]That\'s all we need to create an Executor![/green]') def mustache_repl(srcs): for src in track(srcs, description=f'Creating {exec_name}...', total=len(srcs)): with open( os.path.join(__resources_path__, 'executor-template', src)) as fp, open( os.path.join(exec_path, src), 'w') as fpw: f = (fp.read().replace('{{exec_name}}', exec_name).replace( '{{exec_description}}', exec_description).replace( '{{exec_keywords}}', str(exec_keywords.split(','))).replace( '{{exec_url}}', exec_url)) f = [ v + '\n' for v in f.split('\n') if not ('{{' in v or '}}' in v) ] fpw.writelines(f) Path(exec_path).mkdir(parents=True, exist_ok=True) pkg_files = [ 'executor.py', 'manifest.yml', 'README.md', 'requirements.txt', 'config.yml', ] if is_dockerfile: pkg_files.append('Dockerfile') mustache_repl(pkg_files) table = Table(box=box.SIMPLE) table.add_column('Filename', style='cyan', no_wrap=True) table.add_column('Description', no_wrap=True) # adding the columns in order of `ls` output table.add_row( 'config.yml', 'The YAML config file of the Executor. You can define [bold]__init__[/bold] arguments using [bold]with[/bold] keyword.', ) table.add_row( '', Panel( Syntax( f''' jtype: {exec_name} with: foo: 1 bar: hello metas: py_modules: - executor.py ''', 'yaml', theme='monokai', line_numbers=True, word_wrap=True, ), title='config.yml', width=50, expand=False, ), ) if is_dockerfile: table.add_row( 'Dockerfile', 'The Dockerfile describes how this executor will be built.', ) table.add_row('executor.py', 'The main logic file of the Executor.') table.add_row( 'manifest.yml', 'Metadata for the Executor, for better appeal on Jina Hub.', ) manifest_fields_table = Table(box=box.SIMPLE) manifest_fields_table.add_column('Field', style='cyan', no_wrap=True) manifest_fields_table.add_column('Description', no_wrap=True) manifest_fields_table.add_row('name', 'Human-readable title of the Executor') manifest_fields_table.add_row( 'description', 'Human-readable description of the Executor') manifest_fields_table.add_row( 'url', 'URL to find more information on the Executor (e.g. GitHub repo URL)', ) manifest_fields_table.add_row( 'keywords', 'Keywords that help user find the Executor') table.add_row('', manifest_fields_table) table.add_row('README.md', 'A usage guide of the Executor.') table.add_row('requirements.txt', 'The Python dependencies of the Executor.') final_table = Table(box=None) final_table.add_row( 'Congrats! You have successfully created an Executor! Here are the next steps:' ) p0 = Panel( Syntax( f'cd {exec_path}\nls', 'console', theme='monokai', line_numbers=True, word_wrap=True, ), title='1. Check out the generated Executor', width=120, expand=False, ) p1 = Panel( table, title='2. Understand folder structure', width=120, expand=False, ) p2 = Panel( Syntax( f'jina hub push {exec_path}', 'console', theme='monokai', line_numbers=True, word_wrap=True, ), title='3. Share it to Jina Hub', width=120, expand=False, ) final_table.add_row(p0) final_table.add_row(p1) final_table.add_row(p2) p = Panel( final_table, title=':tada: Next steps', width=130, expand=False, ) console.print(p) def push(self) -> None: """Push the executor pacakge to Jina Hub.""" from rich.console import Console work_path = Path(self.args.path) exec_tags = None if self.args.tag: exec_tags = ','.join(self.args.tag) dockerfile = None if self.args.dockerfile: dockerfile = Path(self.args.dockerfile) if not dockerfile.exists(): raise Exception( f'The given Dockerfile `{dockerfile}` does not exist!') if dockerfile.parent != work_path: raise Exception( f'The Dockerfile must be placed at the given folder `{work_path}`' ) dockerfile = dockerfile.relative_to(work_path) console = Console() with console.status(f'Pushing `{self.args.path}` ...') as st: req_header = get_request_header() try: st.update(f'Packaging {self.args.path} ...') md5_hash = hashlib.md5() bytesio = archive_package(work_path) content = bytesio.getvalue() md5_hash.update(content) md5_digest = md5_hash.hexdigest() # upload the archived package form_data = { 'public': 'True' if getattr(self.args, 'public', None) else 'False', 'private': 'True' if getattr(self.args, 'private', None) else 'False', 'md5sum': md5_digest, } if exec_tags: form_data['tags'] = exec_tags if dockerfile: form_data['dockerfile'] = str(dockerfile) uuid8, secret = load_secret(work_path) if self.args.force_update or uuid8: form_data['force'] = self.args.force_update or uuid8 if self.args.secret or secret: form_data['secret'] = self.args.secret or secret method = 'put' if ('force' in form_data) else 'post' st.update(f'Connecting to Jina Hub ...') hubble_url = get_hubble_url_v1() + '/executors' # upload the archived executor to Jina Hub st.update(f'Uploading...') resp = upload_file( hubble_url, 'filename', content, dict_data=form_data, headers=req_header, stream=True, method=method, ) result = None for stream_line in resp.iter_lines(): stream_msg = json.loads(stream_line) if 'stream' in stream_msg: st.update( f'Cloud building ... [dim]{stream_msg["stream"]}[/dim]' ) elif 'status' in stream_msg: st.update( f'Cloud building ... [dim]{stream_msg["status"]}[/dim]' ) elif 'result' in stream_msg: result = stream_msg['result'] break if result is None: raise Exception('Unknown Error') elif not result.get('data', None): msg = result.get('message', 'Unknown Error') if 'Process(docker) exited on non-zero code' in msg: self.logger.error(''' Failed on building Docker image. Potential solutions: - If you haven't provide a Dockerfile in the executor bundle, you may want to provide one, as the auto-generated one on the cloud did not work. - If you have provided a Dockerfile, you may want to check the validity of this Dockerfile. ''') raise Exception(msg) elif 200 <= result['statusCode'] < 300: new_uuid8, new_secret = self._prettyprint_result( console, result) if new_uuid8 != uuid8 or new_secret != secret: dump_secret(work_path, new_uuid8, new_secret) elif result['message']: raise Exception(result['message']) elif resp.text: # NOTE: sometimes resp.text returns empty raise Exception(resp.text) else: resp.raise_for_status() except KeyboardInterrupt: pass except Exception as e: # IO related errors self.logger.error( f'''Please report this session_id: {colored(req_header["jinameta-session-id"], color="yellow", attrs="bold")} to https://github.com/jina-ai/jina/issues' {e!r}''') raise e def _prettyprint_result(self, console, result): # TODO: only support single executor now from rich.table import Table from rich.panel import Panel data = result.get('data', None) image = data['executors'][0] uuid8 = image['id'] secret = image['secret'] visibility = image['visibility'] tag = self.args.tag[0] if self.args.tag else None table = Table.grid() table.add_column(width=20, no_wrap=True) table.add_column(style='cyan', no_wrap=True) table.add_row( ':link: Hub URL', f'[link=https://hub.jina.ai/executor/{uuid8}/]https://hub.jina.ai/executor/{uuid8}/[/link]', ) if 'name' in image: table.add_row(':name_badge: Name', image['name']) table.add_row(':lock: Secret', secret) table.add_row( '', ':point_up:️ [bold red]Please keep this token in a safe place!', ) table.add_row(':eyes: Visibility', visibility) p1 = Panel( table, title='Published', width=80, expand=False, ) console.print(p1) presented_id = image.get('name', uuid8) usage = (f'{presented_id}' if visibility == 'public' else f'{presented_id}:{secret}') + (f'/{tag}' if tag else '') if not self.args.no_usage: self._get_prettyprint_usage(console, usage) return uuid8, secret def _get_prettyprint_usage(self, console, executor_name, usage_kind=None): from rich.panel import Panel from rich.syntax import Syntax flow_plain = f'''from jina import Flow f = Flow().add(uses='jinahub://{executor_name}') with f: ...''' flow_docker = f'''from jina import Flow f = Flow().add(uses='jinahub+docker://{executor_name}') with f: ...''' p1 = Panel( Syntax(flow_plain, 'python', theme='monokai', line_numbers=True, word_wrap=True), title='Usage', width=80, expand=False, ) p2 = Panel( Syntax( flow_docker, 'python', theme='monokai', line_numbers=True, word_wrap=True, ), title='Docker usage', width=80, expand=False, ) if usage_kind == 'docker': console.print(p2) elif usage_kind == 'source': console.print(p1) else: console.print(p1, p2) @staticmethod @disk_cache_offline(cache_file=str(_cache_file)) def fetch_meta( name: str, tag: str, secret: Optional[str] = None, force: bool = False, ) -> HubExecutor: """Fetch the executor meta info from Jina Hub. :param name: the UUID/Name of the executor :param tag: the tag of the executor if available, otherwise, use `None` as the value :param secret: the access secret of the executor :param force: if set to True, access to fetch_meta will always pull latest Executor metas, otherwise, default to local cache :return: meta of executor .. note:: The `name` and `tag` should be passed via ``args`` and `force` and `secret` as ``kwargs``, otherwise, cache does not work. """ with ImportExtensions(required=True): import requests pull_url = get_hubble_url_v1() + f'/executors/{name}/?' path_params = {} if secret: path_params['secret'] = secret if tag: path_params['tag'] = tag if path_params: pull_url += urlencode(path_params) resp = requests.get(pull_url, headers=get_request_header()) if resp.status_code != 200: if resp.text: raise Exception(resp.text) resp.raise_for_status() resp = resp.json() return HubExecutor( uuid=resp['id'], name=resp.get('name', None), sn=resp.get('sn', None), tag=tag or resp['tag'], visibility=resp['visibility'], image_name=resp['image'], archive_url=resp['package']['download'], md5sum=resp['package']['md5'], ) @staticmethod def deploy_public_sandbox(uses: str): """ Deploy a public sandbox to Jina Hub. :param uses: the executor uses string :return: the host and port of the sandbox """ scheme, name, tag, secret = parse_hub_uri(uses) payload = { 'name': name, 'tag': tag if tag else 'latest', 'jina': __version__, } from rich.progress import Console import requests console = Console() host = None try: res = requests.get( url=get_hubble_url_v2() + '/rpc/sandbox.get', params=payload, headers=get_request_header(), ).json() if res.get('code') == 200: host = res.get('data', {}).get('host', None) except Exception: raise if host: return host, 443 with console.status( f"[bold green]Deploying sandbox for ({name}) since no existing one..." ): try: json = requests.post( url=get_hubble_url_v2() + '/rpc/sandbox.create', json=payload, headers=get_request_header(), ).json() host = json.get('data', {}).get('host', None) livetime = json.get('data', {}).get('livetime', '15 mins') if not host: raise Exception(f'Failed to deploy sandbox: {json}') console.log(f"Deployment completed: {host}") console.log( f"[bold green]This sandbox will be removed when no traffic during {livetime}" ) except: console.log("Deployment failed") raise return host, 443 def _pull_with_progress(self, log_streams, console): from rich.progress import Progress, DownloadColumn, BarColumn with Progress( "[progress.description]{task.description}", BarColumn(), DownloadColumn(), "[progress.percentage]{task.percentage:>3.0f}%", console=console, transient=True, ) as progress: tasks = {} for log in log_streams: if 'status' not in log: continue status = log['status'] status_id = log.get('id', None) pg_detail = log.get('progressDetail', None) if (pg_detail is None) or (status_id is None): self.logger.debug(status) continue if status_id not in tasks: tasks[status_id] = progress.add_task(status, total=0) task_id = tasks[status_id] if ('current' in pg_detail) and ('total' in pg_detail): progress.update( task_id, completed=pg_detail['current'], total=pg_detail['total'], description=status, ) elif not pg_detail: progress.update(task_id, advance=0, description=status) def _load_docker_client(self): with ImportExtensions(required=True): import docker.errors import docker from docker import APIClient from jina import __windows__ try: self._client = docker.from_env() # low-level client self._raw_client = APIClient( base_url=docker.constants.DEFAULT_NPIPE if __windows__ else docker.constants.DEFAULT_UNIX_SOCKET) except docker.errors.DockerException: self.logger.critical( f'Docker daemon seems not running. Please run Docker daemon and try again.' ) exit(1) def pull(self) -> str: """Pull the executor package from Jina Hub. :return: the `uses` string """ from rich.console import Console console = Console() cached_zip_file = None executor_name = None usage_kind = None try: need_pull = self.args.force_update with console.status(f'Pulling {self.args.uri}...') as st: scheme, name, tag, secret = parse_hub_uri(self.args.uri) st.update(f'Fetching [bold]{name}[/bold] from Jina Hub ...') executor, from_cache = HubIO.fetch_meta(name, tag, secret=secret, force=need_pull) presented_id = getattr(executor, 'name', executor.uuid) executor_name = ( f'{presented_id}' if executor.visibility == 'public' else f'{presented_id}:{secret}') + (f'/{tag}' if tag else '') if scheme == 'jinahub+docker': self._load_docker_client() import docker try: self._client.images.get(executor.image_name) except docker.errors.ImageNotFound: need_pull = True if need_pull: st.update(f'Pulling image ...') log_stream = self._raw_client.pull(executor.image_name, stream=True, decode=True) st.stop() self._pull_with_progress( log_stream, console, ) usage_kind = 'docker' return f'docker://{executor.image_name}' elif scheme == 'jinahub': import filelock with filelock.FileLock(get_lockfile(), timeout=-1): try: pkg_path, pkg_dist_path = get_dist_path_of_executor( executor) # check serial number to upgrade sn_file_path = pkg_dist_path / f'PKG-SN-{executor.sn or 0}' if (not sn_file_path.exists()) and any( pkg_dist_path.glob('PKG-SN-*')): raise FileNotFoundError( f'{pkg_path} need to be upgraded') st.update( 'Installing [bold]requirements.txt[/bold]...') install_package_dependencies( install_deps=self.args.install_requirements, pkg_dist_path=pkg_dist_path, pkg_path=pkg_dist_path, ) except FileNotFoundError: need_pull = True if need_pull: # pull the latest executor meta, as the cached data would expire if from_cache: executor, _ = HubIO.fetch_meta(name, tag, secret=secret, force=True) cache_dir = Path( os.environ.get( 'JINA_HUB_CACHE_DIR', Path.home().joinpath('.cache', 'jina'), )) cache_dir.mkdir(parents=True, exist_ok=True) st.update(f'Downloading {name} ...') cached_zip_file = download_with_resume( executor.archive_url, cache_dir, f'{executor.uuid}-{executor.md5sum}.zip', md5sum=executor.md5sum, ) st.update(f'Unpacking {name} ...') install_local( cached_zip_file, executor, install_deps=self.args.install_requirements, ) pkg_path, _ = get_dist_path_of_executor(executor) usage_kind = 'source' return f'{pkg_path / "config.yml"}' else: raise ValueError(f'{self.args.uri} is not a valid scheme') except KeyboardInterrupt: executor_name = None except Exception: executor_name = None raise finally: # delete downloaded zip package if existed if cached_zip_file is not None: cached_zip_file.unlink() if not self.args.no_usage and executor_name: self._get_prettyprint_usage(console, executor_name, usage_kind)
class BaseStore(MutableMapping): """The Base class for Jinad stores""" def __init__(self): self._items = {} # type: Dict['uuid.UUID', Dict[str, Any]] self._logger = JinaLogger(self.__class__.__name__, **vars(jinad_args)) self._init_stats() def _init_stats(self): """Initialize the stats """ self._time_created = datetime.now() self._time_updated = self._time_created self._num_add = 0 self._num_del = 0 def add(self, *args, **kwargs) -> 'uuid.UUID': """Add a new element to the store. This method needs to be overridden by the subclass .. #noqa: DAR101""" raise NotImplementedError def update(self, *args, **kwargs) -> 'uuid.UUID': """Updates the element to the store. This method needs to be overridden by the subclass .. #noqa: DAR101""" raise NotImplementedError def delete( self, id: Union[str, uuid.UUID], workspace: bool = False, everything: bool = False, **kwargs, ): """delete an object from the store :param id: the id of the object :param workspace: whether to delete the workdir of the object :param everything: whether to delete everything :param kwargs: not used """ if isinstance(id, str): id = uuid.UUID(id) if id in self._items: v = self._items[id] if 'object' in v and hasattr(v['object'], 'close'): v['object'].close() if workspace and v.get('workdir', None): for path in Path(v['workdir']).rglob('[!logging.log]*'): if path.is_file(): self._logger.debug(f'file to be deleted: {path}') path.unlink() if everything and v.get('workdir', None): self._logger.debug(f'directory to be deleted: {v["workdir"]}') shutil.rmtree(v['workdir']) del self[id] self._logger.success( f'{colored(str(id), "cyan")} is released from the store.') else: raise KeyError(f'{colored(str(id), "cyan")} not found in store.') def __iter__(self): return iter(self._items) def __len__(self): return len(self._items) def __getitem__(self, key: Union['uuid.UUID', str]): if isinstance(key, str): key = uuid.UUID(key) return self._items[key] def __delitem__(self, key: uuid.UUID): """Release a Pea/Pod/Flow object from the store :param key: the key of the object .. #noqa: DAR201""" self._items.pop(key) self._time_updated = datetime.now() self._num_del += 1 def clear(self) -> None: """delete all the objects in the store""" keys = list(self._items.keys()) for k in keys: self.delete(id=k, workspace=True) def reset(self) -> None: """Calling :meth:`clear` and reset all stats """ self.clear() self._init_stats() def __setitem__(self, key: 'uuid.UUID', value: Dict) -> None: self._items[key] = value t = datetime.now() value.update({'time_created': t}) self._time_updated = t self._num_add += 1 @property def status(self) -> Dict: """Return the status of this store as a dict .. #noqa: DAR201""" return { 'size': len(self._items), 'time_created': self._time_created, 'time_updated': self._time_updated, 'num_add': self._num_add, 'num_del': self._num_del, 'items': self._items, }
class BasePod(ABC): """ :class:`BasePod` is an interface from which all the classes managing the lifetime of a Runtime inside a local process, container or in a remote JinaD instance (to come) must inherit. It exposes the required APIs so that the `BasePod` can be handled by the `cli` api as a context manager or by a `Deployment`. What makes a BasePod a BasePod is that it manages the lifecycle of a Runtime (gateway or not gateway) """ def __init__(self, args: 'argparse.Namespace'): self.args = args if hasattr(self.args, 'port_expose'): self.args.port_in = self.args.port_expose self.args.parallel = self.args.shards self.name = self.args.name or self.__class__.__name__ self.is_forked = False self.logger = JinaLogger(self.name, **vars(self.args)) if self.args.runtime_backend == RuntimeBackendType.THREAD: self.logger.warning( f' Using Thread as runtime backend is not recommended for production purposes. It is ' f'just supposed to be used for easier debugging. Besides the performance considerations, it is' f'specially dangerous to mix `Executors` running in different types of `RuntimeBackends`.' ) self._envs = {'JINA_DEPLOYMENT_NAME': self.name} if self.args.quiet: self._envs['JINA_LOG_CONFIG'] = 'QUIET' if self.args.env: self._envs.update(self.args.env) # arguments needed to create `runtime` and communicate with it in the `run` in the stack of the new process # or thread.f test_worker = { RuntimeBackendType.THREAD: threading.Thread, RuntimeBackendType.PROCESS: multiprocessing.Process, }.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))() self.is_ready = _get_event(test_worker) self.is_shutdown = _get_event(test_worker) self.cancel_event = _get_event(test_worker) self.is_started = _get_event(test_worker) self.ready_or_shutdown = ConditionalEvent( getattr(args, 'runtime_backend', RuntimeBackendType.THREAD), events_list=[self.is_ready, self.is_shutdown], ) self.daemon = self.args.daemon self.runtime_ctrl_address = self._get_control_address() self._timeout_ctrl = self.args.timeout_ctrl def _get_control_address(self): return f'{self.args.host}:{self.args.port_in}' def close(self) -> None: """Close the Pod This method makes sure that the `Process/thread` is properly finished and its resources properly released """ self.logger.debug('waiting for ready or shutdown signal from runtime') if not self.is_shutdown.is_set() and self.is_started.is_set(): try: self.logger.debug(f'terminate') self._terminate() if not self.is_shutdown.wait(timeout=self._timeout_ctrl if not __windows__ else 1.0): if not __windows__: raise Exception( f'Shutdown signal was not received for {self._timeout_ctrl} seconds' ) else: self.logger.warning( 'Pod was forced to close after 1 second. Graceful closing is not available on Windows.' ) except Exception as ex: self.logger.error( f'{ex!r} during {self.close!r}' + f'\n add "--quiet-error" to suppress the exception details' if not self.args.quiet_error else '', exc_info=not self.args.quiet_error, ) else: # here shutdown has been set already, therefore `run` will gracefully finish self.logger.debug( f'{"shutdown is is already set" if self.is_shutdown.is_set() else "Runtime was never started"}. Runtime will end gracefully on its own' ) pass self.is_shutdown.set() self.logger.debug(__stop_msg__) self.logger.close() def __enter__(self): return self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.close() def _wait_for_ready_or_shutdown(self, timeout: Optional[float]): """ Waits for the process to be ready or to know it has failed. :param timeout: The time to wait before readiness or failure is determined .. # noqa: DAR201 """ return AsyncNewLoopRuntime.wait_for_ready_or_shutdown( timeout=timeout, ready_or_shutdown_event=self.ready_or_shutdown.event, ctrl_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl, ) def _fail_start_timeout(self, timeout): """ Closes the Pod and raises a TimeoutError with the corresponding warning messages :param timeout: The time to wait before readiness or failure is determined .. # noqa: DAR201 """ _timeout = timeout or -1 self.logger.warning( f'{self} timeout after waiting for {self.args.timeout_ready}ms, ' f'if your executor takes time to load, you may increase --timeout-ready' ) self.close() raise TimeoutError( f'{typename(self)}:{self.name} can not be initialized after {_timeout * 1e3}ms' ) def _check_failed_to_start(self): """ Raises a corresponding exception if failed to start """ if self.is_shutdown.is_set(): # return too early and the shutdown is set, means something fails!! if not self.is_started.is_set(): raise RuntimeFailToStart else: raise RuntimeRunForeverEarlyError def wait_start_success(self): """Block until all pods starts successfully. If not success, it will raise an error hoping the outer function to catch it """ _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout) async def async_wait_start_success(self): """ Wait for the `Pod` to start successfully in a non-blocking manner """ import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout) @property def role(self) -> 'PodRoleType': """Get the role of this pod in a deployment .. #noqa: DAR201""" return self.args.pod_role @abstractmethod def start(self): """Start the BasePod. This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`. .. #noqa: DAR201 """ ... @abstractmethod def _terminate(self): ... @abstractmethod def join(self, *args, **kwargs): """Joins the BasePod. Wait for the BasePod to properly terminate :param args: extra positional arguments :param kwargs: extra keyword arguments """ ...
class CrudIndexer(Executor): """Simple indexer class""" def __init__(self, **kwargs): super().__init__(**kwargs) self.logger = JinaLogger('CrudIndexer') self._docs = DocumentArray() self._dump_location = os.path.join(self.metas.workspace, 'docs.json') if os.path.exists(self._dump_location): self._docs = DocumentArray.load_json(self._dump_location) self.logger.debug( f'Loaded {len(self._docs)} from {self._dump_location}') else: self.logger.warning(f'No data found at {self._dump_location}') @requests(on='/index') def index(self, docs: 'DocumentArray', **kwargs): self._docs.extend(docs) @requests(on='/update') def update(self, docs: 'DocumentArray', **kwargs): self.delete(docs) self.index(docs) def close(self) -> None: self.logger.debug( f'Dumping {len(self._docs)} to {self._dump_location}') self._docs.save_json(self._dump_location) @requests(on='/delete') def delete(self, docs: 'DocumentArray', **kwargs): # TODO we can do del _docs[d.id] once # tests.unit.types.arrays.test_documentarray.test_delete_by_id is fixed ids_to_delete = [d.id for d in docs] idx_to_delete = [] for i, doc in enumerate(self._docs): if doc.id in ids_to_delete: idx_to_delete.append(i) for i in sorted(idx_to_delete, reverse=True): del self._docs[i] @requests(on='/search') def search(self, docs: 'DocumentArray', parameters: Dict, **kwargs): top_k = int(parameters.get('top_k', 1)) a = np.stack(docs[:, 'embedding']) b = np.stack(self._docs[:, 'embedding']) q_emb = _ext_A(_norm(a)) d_emb = _ext_B(_norm(b)) dists = _cosine(q_emb, d_emb) idx, dist = self._get_sorted_top_k(dists, top_k) for _q, _ids, _dists in zip(docs, idx, dist): for _id, _dist in zip(_ids, _dists): d = Document(self._docs[int(_id)], copy=True) d.scores['cosine'].value = 1 - _dist _q.matches.append(d) @staticmethod def _get_sorted_top_k(dist: 'np.array', top_k: int) -> Tuple['np.ndarray', 'np.ndarray']: if top_k >= dist.shape[1]: idx = dist.argsort(axis=1)[:, :top_k] dist = np.take_along_axis(dist, idx, axis=1) else: idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k] dist = np.take_along_axis(dist, idx_ps, axis=1) idx_fs = dist.argsort(axis=1) idx = np.take_along_axis(idx_ps, idx_fs, axis=1) dist = np.take_along_axis(dist, idx_fs, axis=1) return idx, dist
def run( args: 'argparse.Namespace', name: str, runtime_cls: Type[AsyncNewLoopRuntime], envs: Dict[str, str], is_started: Union['multiprocessing.Event', 'threading.Event'], is_shutdown: Union['multiprocessing.Event', 'threading.Event'], is_ready: Union['multiprocessing.Event', 'threading.Event'], cancel_event: Union['multiprocessing.Event', 'threading.Event'], jaml_classes: Optional[Dict] = None, ): """Method representing the :class:`BaseRuntime` activity. This method is the target for the Pod's `thread` or `process` .. note:: :meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process. Hence, please do not raise any exception here. .. note:: Please note that env variables are process-specific. Subprocess inherits envs from the main process. But Subprocess's envs do NOT affect the main process. It does NOT mess up user local system envs. .. warning:: If you are using ``thread`` as backend, envs setting will likely be overidden by others .. note:: `jaml_classes` contains all the :class:`JAMLCompatible` classes registered in the main process. When using `spawn` as the multiprocessing start method, passing this argument to `run` method re-imports & re-registers all `JAMLCompatible` classes. :param args: namespace args from the Pod :param name: name of the Pod to have proper logging :param runtime_cls: the runtime class to instantiate :param envs: a dictionary of environment variables to be set in the new Process :param is_started: concurrency event to communicate runtime is properly started. Used for better logging :param is_shutdown: concurrency event to communicate runtime is terminated :param is_ready: concurrency event to communicate runtime is ready to receive messages :param cancel_event: concurrency event to receive cancelling signal from the Pod. Needed by some runtimes :param jaml_classes: all the `JAMLCompatible` classes imported in main process """ logger = JinaLogger(name, **vars(args)) def _unset_envs(): if envs and args.runtime_backend != RuntimeBackendType.THREAD: for k in envs.keys(): os.environ.pop(k, None) def _set_envs(): if args.env: if args.runtime_backend == RuntimeBackendType.THREAD: logger.warning( 'environment variables should not be set when runtime="thread".' ) else: os.environ.update({k: str(v) for k, v in envs.items()}) try: _set_envs() runtime = runtime_cls( args=args, cancel_event=cancel_event, ) except Exception as ex: logger.error( f'{ex!r} during {runtime_cls!r} initialization' + f'\n add "--quiet-error" to suppress the exception details' if not args.quiet_error else '', exc_info=not args.quiet_error, ) else: if not is_shutdown.is_set(): is_started.set() with runtime: is_ready.set() runtime.run_forever() finally: _unset_envs() is_shutdown.set() logger.debug(f' Process terminated')
class BaseRuntime: """A Jina Runtime is a procedure that blocks the main process once running (i.e. :meth:`run_forever`), therefore should be put into a separated thread/process, or inside the main process of a docker container. Any program/library/package/module that blocks the main process, can be formulated into a :class:`BaseRuntime` class and then be started from a :class:`Pod`. In the sequel, we call the main process/thread as ``M``, the process/thread blocked :class:`Runtime` as ``S``. In Jina, a :class:`Pod` object is used to manage a :class:`Runtime` object's lifecycle. A :class:`Pod` acts as a :class:`multiprocessing.Process` or :class:`threading.Thread`, it starts from ``M`` and once the ``S`` is spawned, it uses :class:`Runtime` as a context manager: 0. :meth:`__init__` 1. :meth: `__enter__` 2. :meth:`run_forever`. Note that this will block ``S``, step 3 won't be reached until it is unblocked by :meth:`cancel`. 3. When an error occurs during `run_forever` or `cancel` signal is reached by the `runtime`. The `run_forever` method is cancelled and the managed context is closed. The `__exit__` of `Runtime` guarantees that the `Runtime` is properly shut by calling `teardown`. The :meth:`__init__` and :meth:`teardown` pair together, which defines instructions that will be executed before and after. In subclasses, `teardown` is optional. In order to cancel the `run_forever` method of a `Runtime`, you can use their `static` `cancel` method that will make sure that the runtime is properly cancelled. - Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event - Use GrpcConnectionPool to send a TERMINATE message, while :meth:`run_forever` polls for this message Note, another way to jump out from :meth:`run_forever` is raise exceptions from it. This will immediately move to :meth:`teardown`. .. note:: Rule of thumb on exception handling: if you are not sure if you should handle exception inside :meth:`run_forever`, :meth:`cancel`, :meth:`teardown`, then DO NOT catch exception in them. Exception is MUCH better handled by :class:`Pod`. .. seealso:: :class:`Pod` for managing a :class:`Runtime` object's lifecycle. """ def __init__( self, args: 'argparse.Namespace', **kwargs, ): super().__init__() self.args = args if args.name: self.name = f'{args.name}/{self.__class__.__name__}' else: self.name = self.__class__.__name__ self.logger = JinaLogger(self.name, **vars(self.args)) def run_forever(self): """Running the blocking procedure inside ``S``. Note, once this method is called, ``S`` is blocked. .. note:: If this method raises any exception, :meth:`teardown` will be called. .. seealso:: :meth:`cancel` for cancelling the forever loop. """ raise NotImplementedError def teardown(self): """Method called immediately after :meth:`run_forever` is unblocked. You can tidy up things here. Optional in subclasses. The default implementation does nothing. """ self.logger.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type == RuntimeTerminated: self.logger.debug(f'{self!r} is ended') elif exc_type == KeyboardInterrupt: self.logger.debug(f'{self!r} is interrupted by user') elif exc_type and issubclass(exc_type, Exception): self.logger.error( f'{exc_val!r} during {self.run_forever!r}' + f'\n add "--quiet-error" to suppress the exception details' if not self.args.quiet_error else '', exc_info=not self.args.quiet_error, ) try: self.teardown() except OSError: # OSError(Stream is closed) already pass except Exception as ex: self.logger.error( f'{ex!r} during {self.teardown!r}' + f'\n add "--quiet-error" to suppress the exception details' if not self.args.quiet_error else '', exc_info=not self.args.quiet_error, ) # https://stackoverflow.com/a/28158006 # return True will silent all exception stack trace here, silence is desired here as otherwise it is too # noisy # # doc: If an exception is supplied, and the method wishes to suppress the exception (i.e., prevent it # from being propagated), it should return a true value. Otherwise, the exception will be processed normally # upon exit from this method. return True
class TestExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) self._name = self.runtime_args.name @requests(on='/index') def debug(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) key = 'traversed-executors' for doc in docs: if key not in doc.tags: doc.tags[key] = [] traversed = list(doc.tags.get(key)) traversed.append(self._name) doc.tags[key] = traversed doc.tags['parallel'] = self.runtime_args.parallel doc.tags['shards'] = self.runtime_args.shards doc.tags['shard_id'] = self.runtime_args.shard_id doc.tags['pea_id'] = self.runtime_args.pea_id @requests(on='/env') def env(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) for doc in docs: doc.tags['k1'] = os.environ.get('k1') doc.tags['k2'] = os.environ.get('k2') doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL') doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')} @requests(on='/cuda') def cuda(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) from jina.peapods.pods.k8slib.kubernetes_client import K8sClients client = K8sClients().core_v1 pods = client.list_namespaced_pod('test-gpu') # List[V1Pod] pod_spec = pods[0].spec # V1PodSpec pod_container = pod_spec.containers[0] # V1Container pod_resources = pod_container.resources # V1ResourceRequirements for doc in docs: doc.tags['resources']['limits'] = pod_resources.limits @requests(on='/search') def read_file(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) key = 'file' file_path = '/shared/test_file.txt' with open(file_path, 'r') as text_file: lines = text_file.readlines() for doc in docs: doc.tags[key] = lines