Exemple #1
0
def test_start_inference_instance(mocker):
    submit_experiment_mock = mocker.patch('commands.predict.common.submit_experiment')
    fake_experiment = MagicMock()
    submit_experiment_mock.return_value = [fake_experiment], {}, 'bla'

    inference_instance = start_inference_instance(name='', model_location='', model_name='', local_model_location='',
                                                  template='')

    assert inference_instance == fake_experiment
Exemple #2
0
def batch(state: State, name: str, model_location: str,
          local_model_location: str, data: str, output: str, model_name: str,
          tf_record: bool, pack_param: List[Tuple[str,
                                                  str]], requirements: str):
    """
    Starts a new batch instance that will perform prediction on provided data.
    """
    if not model_location and not local_model_location:
        handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format(
            local_model_location=local_model_location))
        exit(1)

    if local_model_location:
        validate_local_model_location(local_model_location)

    # noinspection PyBroadException
    try:
        model_name = model_name if model_name else os.path.basename(
            model_location)
        name = name if name else generate_name(
            name=model_name, prefix=INFERENCE_INSTANCE_PREFIX)
        inference_instance = start_inference_instance(
            name=name,
            model_location=model_location,
            local_model_location=local_model_location,
            model_name=model_name,
            template=BATCH_INFERENCE_TEMPLATE,
            data_location=data,
            output_location=output,
            tf_record=tf_record,
            pack_params=pack_param,
            requirements=requirements)
    except Exception:
        handle_error(logger,
                     Texts.OTHER_INSTANCE_CREATION_ERROR_MSG,
                     Texts.OTHER_INSTANCE_CREATION_ERROR_MSG,
                     add_verbosity_msg=state.verbosity == 0)
        exit(1)

    click.echo(
        tabulate(
            {
                Texts.TABLE_NAME_HEADER:
                [inference_instance.cli_representation.name],
                Texts.TABLE_MODEL_LOCATION_HEADER: [model_location],
                Texts.TABLE_STATUS_HEADER:
                [inference_instance.cli_representation.status]
            },
            headers=Texts.TABLE_HEADERS,
            tablefmt="orgtbl"))
Exemple #3
0
def launch(state: State, name: str, model_location: str,
           local_model_location: str, model_name: str,
           pack_param: List[Tuple[str, str]], requirements: str):
    """
    Starts a new prediction instance that can be used for performing prediction, classification and
    regression tasks on trained model.
    """
    if not model_location and not local_model_location:
        handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format(
            local_model_location=local_model_location))
        exit(1)

    if local_model_location:
        validate_local_model_location(local_model_location)

    click.echo('Submitting prediction instance.')
    try:
        model_path = model_location.rstrip(
            '/') if model_location else local_model_location.rstrip('/')
        model_name = model_name if model_name else os.path.basename(model_path)
        name = name if name else generate_name(
            name=model_name, prefix=INFERENCE_INSTANCE_PREFIX)
        inference_instance = start_inference_instance(
            name=name,
            model_location=model_location,
            model_name=model_name,
            local_model_location=local_model_location,
            requirements=requirements,
            pack_params=pack_param)
        if inference_instance.state == RunStatus.FAILED:
            raise RuntimeError('Inference instance submission failed.')
    except Exception:
        handle_error(logger,
                     Texts.INSTANCE_START_ERROR_MSG,
                     Texts.INSTANCE_START_ERROR_MSG,
                     add_verbosity_msg=state.verbosity == 0)
        exit(1)

    click.echo(
        tabulate([[
            inference_instance.cli_representation.name, model_location,
            inference_instance.cli_representation.status
        ]],
                 headers=Texts.TABLE_HEADERS,
                 tablefmt="orgtbl"))

    try:
        namespace = get_kubectl_current_context_namespace()
        authorization_header = get_authorization_header(
            service_account_name=name, namespace=namespace)
        inference_instance_url = get_inference_instance_url(
            inference_instance=inference_instance, model_name=model_name)
        click.echo(
            Texts.INSTANCE_INFO_MSG.format(
                inference_instance_url=inference_instance_url,
                authorization_header=authorization_header))
    except Exception:
        handle_error(logger,
                     Texts.INSTANCE_URL_ERROR_MSG,
                     Texts.INSTANCE_URL_ERROR_MSG,
                     add_verbosity_msg=state.verbosity == 0)
        exit(1)
Exemple #4
0
def launch(ctx: click.Context, name: str, model_location: str,
           local_model_location: str, model_name: str,
           pack_param: List[Tuple[str, str]], requirements: str,
           runtime: InferenceRuntime):
    """
    Starts a new prediction instance that can be used for performing prediction, classification and
    regression tasks on trained model.
    """
    if not model_location and not local_model_location:
        handle_error(user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format(
            local_model_location=local_model_location))
        exit(1)

    if local_model_location:
        validate_local_model_location(local_model_location)

    click.echo('Submitting prediction instance.')
    try:
        template = INFERENCE_TEMPLATE_OVMS if InferenceRuntime(runtime) == InferenceRuntime.OVMS else \
            INFERENCE_TEMPLATE_TFSERVING
        model_path = model_location.rstrip(
            '/') if model_location else local_model_location.rstrip('/')
        model_name = model_name if model_name else os.path.basename(model_path)
        name = name if name else generate_name(
            name=model_name, prefix=INFERENCE_INSTANCE_PREFIX)
        inference_instance = start_inference_instance(
            name=name,
            model_location=model_location,
            model_name=model_name,
            local_model_location=local_model_location,
            template=template,
            requirements=requirements,
            pack_params=pack_param)
        if inference_instance.state == RunStatus.FAILED:
            raise RuntimeError('Inference instance submission failed.')
    except Exception:
        handle_error(logger,
                     Texts.INSTANCE_START_ERROR_MSG,
                     Texts.INSTANCE_START_ERROR_MSG,
                     add_verbosity_msg=ctx.obj.verbosity == 0)
        exit(1)

    click.echo(
        tabulate([[
            inference_instance.cli_representation.name, model_location,
            inference_instance.cli_representation.status
        ]],
                 headers=Texts.TABLE_HEADERS,
                 tablefmt=TBLT_TABLE_FORMAT))

    try:
        namespace = get_kubectl_current_context_namespace()
        authorization_header = get_authorization_header(
            service_account_name=name, namespace=namespace)
        inference_instance_url = get_inference_instance_url(
            inference_instance=inference_instance, model_name=model_name)
        click.echo(
            Texts.INSTANCE_INFO_MSG.format(
                inference_instance_url=inference_instance_url,
                authorization_header=authorization_header))
    except Exception:
        handle_error(logger,
                     Texts.INSTANCE_URL_ERROR_MSG,
                     Texts.INSTANCE_URL_ERROR_MSG,
                     add_verbosity_msg=ctx.obj.verbosity == 0)
        exit(1)

    # wait till pod is ready - no more than 40 seconds
    for _ in range(40):
        pods = get_namespaced_pods(label_selector=f'runName={name}',
                                   namespace=namespace)
        if pods and all(pod.status.phase == 'Running' for pod in pods) \
                and all(container.ready for pod in pods for container in pod.status.container_statuses):
            break
        if pods and any(pod.status.phase == 'Failed' for pod in pods):
            handle_error(logger,
                         Texts.INSTANCE_START_ERROR_MSG,
                         Texts.INSTANCE_START_ERROR_MSG,
                         add_verbosity_msg=ctx.obj.verbosity == 0)
            exit(1)

        time.sleep(1)
    else:
        handle_error(logger,
                     Texts.PREDICTION_INSTANCE_NOT_READY.format(name=name),
                     Texts.PREDICTION_INSTANCE_NOT_READY.format(name=name),
                     add_verbosity_msg=ctx.obj.verbosity == 0)
        exit(0)