def batch(state: State, name: str, model_location: str, local_model_location: str, data: str, output: str, model_name: str, tf_record: bool, pack_param: List[Tuple[str, str]], requirements: str): """ Starts a new batch instance that will perform prediction on provided data. """ if not model_location and not local_model_location: handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format( local_model_location=local_model_location)) exit(1) if local_model_location: validate_local_model_location(local_model_location) # noinspection PyBroadException try: model_name = model_name if model_name else os.path.basename( model_location) name = name if name else generate_name( name=model_name, prefix=INFERENCE_INSTANCE_PREFIX) inference_instance = start_inference_instance( name=name, model_location=model_location, local_model_location=local_model_location, model_name=model_name, template=BATCH_INFERENCE_TEMPLATE, data_location=data, output_location=output, tf_record=tf_record, pack_params=pack_param, requirements=requirements) except Exception: handle_error(logger, Texts.OTHER_INSTANCE_CREATION_ERROR_MSG, Texts.OTHER_INSTANCE_CREATION_ERROR_MSG, add_verbosity_msg=state.verbosity == 0) exit(1) click.echo( tabulate( { Texts.TABLE_NAME_HEADER: [inference_instance.cli_representation.name], Texts.TABLE_MODEL_LOCATION_HEADER: [model_location], Texts.TABLE_STATUS_HEADER: [inference_instance.cli_representation.status] }, headers=Texts.TABLE_HEADERS, tablefmt="orgtbl"))
def interact(ctx: click.Context, name: str, filename: str, pack_param: List[Tuple[str, str]], no_launch: bool, port_number: int, env: List[str], template: str): """ Starts an interactive session with Jupyter Notebook. """ current_namespace = get_kubectl_current_context_namespace() jupyters_number = calculate_number_of_running_jupyters(current_namespace) if jupyters_number > ACCEPTED_NUMBER_OF_NOTEBOOKS: if not click.confirm( Texts.TOO_MANY_JUPYTERS.format( jupyter_number=str(jupyters_number))): click.echo(Texts.INTERACT_ABORT_MSG) sys.exit(0) create_new_notebook = True jupyter_experiment = None if name: try: jupyter_experiment = Experiment.get(name=name, namespace=current_namespace) if jupyter_experiment and filename: handle_error(user_msg=Texts.FILENAME_BUT_SESSION_EXISTS) sys.exit(1) if jupyter_experiment: metadata = jupyter_experiment.metadata if metadata and metadata.get("labels") and metadata.get( "labels").get("script_name"): filename = metadata.get("labels").get("script_name") except Exception: handle_error(logger, Texts.EXPERIMENT_GET_ERROR_MSG, Texts.EXPERIMENT_GET_ERROR_MSG) sys.exit(1) # if experiment exists and is not based on jupyter image - we need to ask a user to choose another name if jupyter_experiment and jupyter_experiment.template_name not in JUPYTER_NOTEBOOK_TEMPLATES_NAMES: handle_error(user_msg=Texts.NAME_ALREADY_USED.format(name=name)) sys.exit(1) # if experiment exists but its state is different than RUNNING - display info about a need of purging of # this experiment if jupyter_experiment and jupyter_experiment.state not in \ [ExperimentStatus.SUBMITTED, ExperimentStatus.CREATING]: handle_error( user_msg=Texts.EXP_WITH_THE_SAME_NAME_MUST_BE_PURGED.format( name=name)) sys.exit(1) if not jupyter_experiment and ( not click.get_current_context().obj.force and not click.confirm(Texts.CONFIRM_EXPERIMENT_CREATION)): sys.exit(0) if jupyter_experiment: create_new_notebook = False else: try: check_experiment_name(value=name) except click.BadParameter as exe: handle_error(user_msg=str(exe)) sys.exit(1) number_of_retries = 0 if create_new_notebook: number_of_retries = 5 try: exp_name = name if not name and not filename: exp_name = generate_name("jup") click.echo(Texts.SUBMITTING_EXPERIMENT_USER_MSG) runs, runs_errors, filename = submit_experiment( run_kind=RunKinds.JUPYTER, script_location=filename, script_folder_location=None, template=template, name=exp_name, parameter_range=[], parameter_set=(), script_parameters=(), pack_params=pack_param, env_variables=env) click.echo( tabulate( { RUN_NAME: [run.cli_representation.name for run in runs], RUN_PARAMETERS: [run.cli_representation.parameters for run in runs], RUN_STATUS: [run.cli_representation.status for run in runs], RUN_MESSAGE: [runs_errors.get(run.name, "") for run in runs] }, headers=[ RUN_NAME, RUN_PARAMETERS, RUN_STATUS, RUN_MESSAGE ], tablefmt=TBLT_TABLE_FORMAT)) if runs: name = runs[0].name else: # run wasn't created - error raise RuntimeError("Run wasn't created") except K8sProxyCloseError as exe: handle_error(user_msg=exe.message) sys.exit(1) except SubmitExperimentError as exe: handle_error( logger, Texts.SUBMIT_ERROR_MSG.format(exception_message=exe.message), Texts.SUBMIT_ERROR_MSG.format(exception_message=exe.message)) sys.exit(1) except Exception: handle_error(logger, Texts.SUBMIT_OTHER_ERROR_MSG, Texts.SUBMIT_OTHER_ERROR_MSG) sys.exit(1) else: # if jupyter service exists - the system only connects to it click.echo(Texts.SESSION_EXISTS_MSG) url_end = "" if filename: # only Jupyter notebooks are opened directly, other files are opened in edit mode url_end = f"/notebooks/output/experiment/" if jupyter_experiment and filename.endswith(".py"): filename = filename[:filename.index(".py", -3)] + ".ipynb" if not filename.endswith(".ipynb"): url_end = "/edit/" url_end = url_end + Path(filename).name # wait until all jupyter pods are ready for i in range(JUPYTER_CHECK_POD_READY_TRIES): try: if check_pods_status(run_name=name, namespace=current_namespace, status=PodStatus.RUNNING): break except Exception: handle_error(logger, Texts.NOTEBOOK_STATE_CHECK_ERROR_MSG) sys.exit(1) time.sleep(1) else: handle_error(user_msg=Texts.NOTEBOOK_NOT_READY_ERROR_MSG) sys.exit(1) try: launch_app(k8s_app_name=NAUTAAppNames.JUPYTER, app_name=name, no_launch=no_launch, number_of_retries=number_of_retries, url_end=url_end, port=port_number) except LaunchError as exe: handle_error(logger, exe.message, exe.message) sys.exit(1) except ProxyClosingError: handle_error(user_msg=Texts.PROXY_CLOSING_ERROR_MSG) sys.exit(1) except Exception: handle_error(logger, Texts.SESSION_LAUNCH_OTHER_ERROR_MSG, Texts.SESSION_LAUNCH_OTHER_ERROR_MSG) sys.exit(1)
def launch(state: State, name: str, model_location: str, local_model_location: str, model_name: str, pack_param: List[Tuple[str, str]], requirements: str): """ Starts a new prediction instance that can be used for performing prediction, classification and regression tasks on trained model. """ if not model_location and not local_model_location: handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format( local_model_location=local_model_location)) exit(1) if local_model_location: validate_local_model_location(local_model_location) click.echo('Submitting prediction instance.') try: model_path = model_location.rstrip( '/') if model_location else local_model_location.rstrip('/') model_name = model_name if model_name else os.path.basename(model_path) name = name if name else generate_name( name=model_name, prefix=INFERENCE_INSTANCE_PREFIX) inference_instance = start_inference_instance( name=name, model_location=model_location, model_name=model_name, local_model_location=local_model_location, requirements=requirements, pack_params=pack_param) if inference_instance.state == RunStatus.FAILED: raise RuntimeError('Inference instance submission failed.') except Exception: handle_error(logger, Texts.INSTANCE_START_ERROR_MSG, Texts.INSTANCE_START_ERROR_MSG, add_verbosity_msg=state.verbosity == 0) exit(1) click.echo( tabulate([[ inference_instance.cli_representation.name, model_location, inference_instance.cli_representation.status ]], headers=Texts.TABLE_HEADERS, tablefmt="orgtbl")) try: namespace = get_kubectl_current_context_namespace() authorization_header = get_authorization_header( service_account_name=name, namespace=namespace) inference_instance_url = get_inference_instance_url( inference_instance=inference_instance, model_name=model_name) click.echo( Texts.INSTANCE_INFO_MSG.format( inference_instance_url=inference_instance_url, authorization_header=authorization_header)) except Exception: handle_error(logger, Texts.INSTANCE_URL_ERROR_MSG, Texts.INSTANCE_URL_ERROR_MSG, add_verbosity_msg=state.verbosity == 0) exit(1)
def launch(ctx: click.Context, name: str, model_location: str, local_model_location: str, model_name: str, pack_param: List[Tuple[str, str]], requirements: str, runtime: InferenceRuntime): """ Starts a new prediction instance that can be used for performing prediction, classification and regression tasks on trained model. """ if not model_location and not local_model_location: handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format( local_model_location=local_model_location)) exit(1) if local_model_location: validate_local_model_location(local_model_location) click.echo('Submitting prediction instance.') try: template = INFERENCE_TEMPLATE_OVMS if InferenceRuntime(runtime) == InferenceRuntime.OVMS else \ INFERENCE_TEMPLATE_TFSERVING model_path = model_location.rstrip( '/') if model_location else local_model_location.rstrip('/') model_name = model_name if model_name else os.path.basename(model_path) name = name if name else generate_name( name=model_name, prefix=INFERENCE_INSTANCE_PREFIX) inference_instance = start_inference_instance( name=name, model_location=model_location, model_name=model_name, local_model_location=local_model_location, template=template, requirements=requirements, pack_params=pack_param) if inference_instance.state == RunStatus.FAILED: raise RuntimeError('Inference instance submission failed.') except Exception: handle_error(logger, Texts.INSTANCE_START_ERROR_MSG, Texts.INSTANCE_START_ERROR_MSG, add_verbosity_msg=ctx.obj.verbosity == 0) exit(1) click.echo( tabulate([[ inference_instance.cli_representation.name, model_location, inference_instance.cli_representation.status ]], headers=Texts.TABLE_HEADERS, tablefmt=TBLT_TABLE_FORMAT)) try: namespace = get_kubectl_current_context_namespace() authorization_header = get_authorization_header( service_account_name=name, namespace=namespace) inference_instance_url = get_inference_instance_url( inference_instance=inference_instance, model_name=model_name) click.echo( Texts.INSTANCE_INFO_MSG.format( inference_instance_url=inference_instance_url, authorization_header=authorization_header)) except Exception: handle_error(logger, Texts.INSTANCE_URL_ERROR_MSG, Texts.INSTANCE_URL_ERROR_MSG, add_verbosity_msg=ctx.obj.verbosity == 0) exit(1) # wait till pod is ready - no more than 40 seconds for _ in range(40): pods = get_namespaced_pods(label_selector=f'runName={name}', namespace=namespace) if pods and all(pod.status.phase == 'Running' for pod in pods) \ and all(container.ready for pod in pods for container in pod.status.container_statuses): break if pods and any(pod.status.phase == 'Failed' for pod in pods): handle_error(logger, Texts.INSTANCE_START_ERROR_MSG, Texts.INSTANCE_START_ERROR_MSG, add_verbosity_msg=ctx.obj.verbosity == 0) exit(1) time.sleep(1) else: handle_error(logger, Texts.PREDICTION_INSTANCE_NOT_READY.format(name=name), Texts.PREDICTION_INSTANCE_NOT_READY.format(name=name), add_verbosity_msg=ctx.obj.verbosity == 0) exit(0)