Пример #1
0
    def run(api_name, run_args, bento=None, with_conda=False):
        track_cli('run')
        bento_service_bundle_path = resolve_bundle_path(
            bento, pip_installed_bundle_path)

        if with_conda:
            run_with_conda_env(
                bento_service_bundle_path,
                'bentoml run {api_name} {bento} {args}'.format(
                    bento=bento_service_bundle_path,
                    api_name=api_name,
                    args=' '.join(map(escape_shell_params, run_args)),
                ),
            )
            return

        api = load_bento_service_api(bento_service_bundle_path, api_name)
        api.handle_cli(run_args)
Пример #2
0
    def get(name, output, namespace):
        track_cli('deploy-get')

        yatai_service = get_yatai_service()
        result = get_deployment(namespace, name, yatai_service)
        if result.status.status_code != status_pb2.Status.OK:
            _echo(
                'Failed to get deployment {name}. code: {error_code}, message: '
                '{error_message}'.format(
                    name=name,
                    error_code=status_pb2.Status.Code.Name(
                        result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            _print_deployment_info(result.deployment, output)
Пример #3
0
    def delete(name, namespace):
        track_cli('deploy-delete')

        result = get_yatai_service().DeleteDeployment(
            DeleteDeploymentRequest(deployment_name=name, namespace=namespace))
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to delete deployment {name}. code: {error_code}, message: '
                '{error_message}'.format(
                    name=name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            _echo('Successfully delete deployment {}'.format(name),
                  CLI_COLOR_SUCCESS)
Пример #4
0
    def get(name, output, namespace):
        track_cli('deploy-get')

        result = get_yatai_service().GetDeployment(
            GetDeploymentRequest(deployment_name=name, namespace=namespace)
        )
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to get deployment {name}. code: {error_code}, message: '
                '{error_message}'.format(
                    name=name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            display_deployment_info(result.deployment, output)
Пример #5
0
    def serve(port, bento=None, with_conda=False):
        track_cli('serve')
        bento_service_bundle_path = resolve_bundle_path(
            bento, pip_installed_bundle_path)

        if with_conda:
            run_with_conda_env(
                bento_service_bundle_path,
                'bentoml serve {bento} --port {port}'.format(
                    bento=bento_service_bundle_path,
                    port=port,
                ),
            )
            return

        bento_service = load(bento_service_bundle_path)
        server = BentoAPIServer(bento_service, port=port)
        server.start()
Пример #6
0
 def list(output, limit=None, filter=None, labels=None):
     track_cli('deploy-list')
     result = get_yatai_service().ListDeployments(
         ListDeploymentsRequest(limit=limit,
                                filter=filter,
                                labels=parse_key_value_pairs(labels)))
     if result.status.status_code != Status.OK:
         _echo(
             'Failed to list deployments. code: {error_code}, message: {error_message}'
             .format(
                 error_code=Status.Code.Name(result.status.status_code),
                 error_message=result.status.error_message,
             ),
             CLI_COLOR_ERROR,
         )
     else:
         for deployment_pb in result.deployments:
             display_deployment_info(deployment_pb, output)
Пример #7
0
 def delete(name, namespace, force):
     yatai_client = YataiClient()
     get_deployment_result = yatai_client.deployment.get(
         namespace=namespace, name=name)
     if get_deployment_result.status.status_code != status_pb2.Status.OK:
         error_code, error_message = status_pb_to_error_code_and_message(
             get_deployment_result.status)
         _echo(
             f'Failed to get AWS Lambda deployment {name} for deletion '
             f'{error_code}:{error_message}',
             CLI_COLOR_ERROR,
         )
         return
     track_cli('deploy-delete', PLATFORM_NAME)
     try:
         result = yatai_client.deployment.delete(namespace=namespace,
                                                 deployment_name=name,
                                                 force_delete=force)
         if result.status.status_code != status_pb2.Status.OK:
             error_code, error_message = status_pb_to_error_code_and_message(
                 result.status)
             track_cli(
                 'deploy-delete-failure',
                 PLATFORM_NAME,
                 {
                     'error_code': error_code,
                     'error_message': error_message
                 },
             )
             _echo(
                 f'Failed to delete AWS Lambda deployment {name} '
                 f'{error_code}:{error_message}',
                 CLI_COLOR_ERROR,
             )
             return
         extra_properties = {}
         if get_deployment_result.deployment.created_at:
             stopped_time = datetime.utcnow()
             extra_properties['uptime'] = int(
                 (stopped_time -
                  get_deployment_result.deployment.created_at.ToDatetime()
                  ).total_seconds())
         track_cli('deploy-delete-success', PLATFORM_NAME, extra_properties)
         _echo(
             f'Successfully deleted AWS Lambda deployment "{name}"',
             CLI_COLOR_SUCCESS,
         )
     except BentoMLException as e:
         track_cli('deploy-delete-failure', PLATFORM_NAME,
                   {'error_message': str(e)})
         _echo(
             f'Failed to delete AWS Lambda deployment {name} {str(e)}',
             CLI_COLOR_ERROR,
         )
Пример #8
0
    def serve_gunicorn(port,
                       workers,
                       timeout,
                       archive_path=archive_path,
                       with_conda=False):
        if with_conda:
            config = load_bentoml_config(archive_path)
            metadata = config['metadata']
            env_name = metadata['service_name'] + '_' + metadata[
                'service_version']
            pip_req = os.path.join(archive_path, 'requirements.txt')

            subprocess.call(
                'command -v conda >/dev/null 2>&1 || {{ echo >&2 "--with-conda '
                'parameter requires conda but it\'s not installed."; exit 1; }} && '
                'conda env update -n {env_name} -f {env_file} && '
                'conda init bash && '
                'eval "$(conda shell.bash hook)" && '
                'conda activate {env_name} && '
                '{{ [ -f {pip_req} ] && pip install -r {pip_req} || echo "no pip '
                'dependencies."; }} &&'
                'bentoml serve_gunicorn {archive_path} -p {port} -w {workers} '
                '--timeout {timeout}'.format(
                    env_name=env_name,
                    env_file=os.path.join(archive_path, 'environment.yml'),
                    archive_path=archive_path,
                    port=port,
                    workers=workers,
                    timeout=timeout,
                    pip_req=pip_req,
                ),
                shell=True,
            )
            return

        track_cli('serve_gunicorn')

        from bentoml.server.gunicorn_server import GunicornBentoServer

        gunicorn_app = GunicornBentoServer(archive_path, port, workers,
                                           timeout)
        gunicorn_app.run()
Пример #9
0
    def list_bentos(limit, offset, order_by, ascending_order, output):
        yatai_client = YataiClient()
        track_cli('bento-list')
        list_bentos_result = yatai_client.repository.list(
            limit=limit,
            offset=offset,
            order_by=order_by,
            ascending_order=ascending_order,
        )
        if list_bentos_result.status.status_code != status_pb2.Status.OK:
            error_code, error_message = status_pb_to_error_code_and_message(
                list_bentos_result.status)
            _echo(
                f'Failed to list BentoServices '
                f'{error_code}:{error_message}',
                CLI_COLOR_ERROR,
            )
            return

        _print_bentos_info(list_bentos_result.bentos, output)
Пример #10
0
    def apply(deployment_yaml, output, wait):
        track_cli('deploy-apply', deployment_yaml.get('spec', {}).get('operator'))
        try:
            deployment_pb = deployment_yaml_to_pb(deployment_yaml)
            yatai_service = get_yatai_service()
            result = yatai_service.ApplyDeployment(
                ApplyDeploymentRequest(deployment=deployment_pb)
            )
            if result.status.status_code != Status.OK:
                _echo(
                    'Failed to apply deployment {name}. code: {error_code}, message: '
                    '{error_message}'.format(
                        name=deployment_pb.name,
                        error_code=Status.Code.Name(result.status.status_code),
                        error_message=result.status.error_message,
                    ),
                    CLI_COLOR_ERROR,
                )
            else:
                if wait:
                    result_state = get_state_after_await_action_complete(
                        yatai_service=yatai_service,
                        name=deployment_pb.name,
                        namespace=deployment_pb.namespace,
                        message='Applying deployment...',
                    )
                    result.deployment.state.CopyFrom(result_state.state)

                _echo(
                    'Successfully applied spec to deployment {}'.format(
                        deployment_pb.name
                    ),
                    CLI_COLOR_SUCCESS,
                )
                display_deployment_info(result.deployment, output)
        except BentoMLException as e:
            _echo(
                'Failed to apply deployment {name}. Error message: {message}'.format(
                    name=deployment_pb.name, message=e
                )
            )
Пример #11
0
 def update(
     name, namespace, bento, min_instances, max_burst, premium_plan_sku, output, wait
 ):
     yatai_client = YataiClient()
     track_cli('deploy-update', PLATFORM_NAME)
     if bento:
         bento_name, bento_version = bento.split(':')
     else:
         bento_name = None
         bento_version = None
     try:
         with Spinner(f'Updating Azure Functions deployment {name}'):
             result = yatai_client.deployment.update_azure_functions_deployment(
                 namespace=namespace,
                 deployment_name=name,
                 bento_name=bento_name,
                 bento_version=bento_version,
                 min_instances=min_instances,
                 max_burst=max_burst,
                 premium_plan_sku=premium_plan_sku,
                 wait=wait,
             )
             if result.status.status_code != status_pb2.Status.OK:
                 error_code, error_message = status_pb_to_error_code_and_message(
                     result.status
                 )
                 _echo(
                     f'Failed to update Azure Functions deployment {name}. '
                     f'{error_code}:{error_message}'
                 )
             track_cli('deploy-update-success', PLATFORM_NAME)
             _echo(
                 f'Successfully update Azure Functions deployment {name}',
                 CLI_COLOR_SUCCESS,
             )
             _print_deployment_info(result.deployment, output)
     except BentoMLException as e:
         _echo(
             f'Failed to update Azure Functions deployment {name}: {str(e)}',
             CLI_COLOR_ERROR,
         )
Пример #12
0
 def delete(name, namespace, force):
     yatai_service = get_yatai_service()
     get_deployment_result = get_deployment(namespace, name, yatai_service)
     if get_deployment_result.status.status_code != status_pb2.Status.OK:
         _echo(
             'Failed to get deployment {} for deletion. {}:{}'.format(
                 name,
                 status_pb2.Status.Code.Name(
                     get_deployment_result.status.status_code),
                 get_deployment_result.status.error_message,
             ),
             CLI_COLOR_ERROR,
         )
         return
     platform = DeploymentSpec.DeploymentOperator.Name(
         get_deployment_result.deployment.spec.operator)
     track_cli('deploy-delete', platform)
     result = delete_deployment(name, namespace, force, yatai_service)
     if result.status.status_code == status_pb2.Status.OK:
         extra_properties = {}
         if get_deployment_result.deployment.created_at:
             stopped_time = datetime.utcnow()
             extra_properties['uptime'] = int(
                 (stopped_time -
                  get_deployment_result.deployment.created_at.ToDatetime()
                  ).total_seconds())
         track_cli('deploy-delete-success', platform, extra_properties)
         _echo('Successfully deleted deployment "{}"'.format(name),
               CLI_COLOR_SUCCESS)
     else:
         _echo(
             'Failed to delete deployment {name}. code: {error_code}, message: '
             '{error_message}'.format(
                 name=name,
                 error_code=status_pb2.Status.Code.Name(
                     result.status.status_code),
                 error_message=result.status.error_message,
             ),
             CLI_COLOR_ERROR,
         )
Пример #13
0
    def run(ctx, api_name, archive_path=archive_path, with_conda=False):
        if with_conda:
            config = load_bentoml_config(archive_path)
            metadata = config['metadata']
            env_name = metadata['service_name'] + '_' + metadata[
                'service_version']

            yaml = YAML()
            yaml.default_flow_style = False
            tmpf = tempfile.NamedTemporaryFile(delete=False)
            env_path = tmpf.name
            yaml.dump(config['env']['conda_env'], Path(env_path))

            pip_req = os.path.join(archive_path, 'requirements.txt')

            subprocess.call(
                'command -v conda >/dev/null 2>&1 || {{ echo >&2 "--with-conda '
                'parameter requires conda but it\'s not installed."; exit 1; }} && '
                'conda env update -n {env_name} -f {env_file} && '
                'conda init bash && '
                'eval "$(conda shell.bash hook)" && '
                'conda activate {env_name} && '
                '{{ [ -f {pip_req} ] && pip install -r {pip_req} || echo "no pip '
                'dependencies."; }} &&'
                'bentoml {api_name} {archive_path} {args}'.format(
                    env_name=env_name,
                    env_file=env_path,
                    archive_path=archive_path,
                    api_name=api_name,
                    args=' '.join(map(escape_shell_params, ctx.args)),
                    pip_req=pip_req,
                ),
                shell=True,
            )
            return

        track_cli('run')

        api = load_service_api(archive_path, api_name)
        api.handle_cli(ctx.args)
Пример #14
0
 def create(deployment_yaml, output, wait):
     yatai_client = YataiClient()
     platform_name = deployment_yaml.get('spec', {}).get('operator')
     deployment_name = deployment_yaml.get('name')
     track_cli('deploy-create', platform_name)
     try:
         with Spinner('Creating deployment '):
             result = yatai_client.deployment.create(deployment_yaml, wait)
         if result.status.status_code != status_pb2.Status.OK:
             error_code, error_message = status_pb_to_error_code_and_message(
                 result.status
             )
             track_cli(
                 'deploy-create-failure',
                 platform_name,
                 {'error_code': error_code, 'error_message': error_message},
             )
             _echo(
                 f'Failed to create deployment {deployment_name} '
                 f'{error_code}:{error_message}',
                 CLI_COLOR_ERROR,
             )
             return
         track_cli('deploy-create-success', platform_name)
         _echo(
             f'Successfully created deployment {deployment_name}', CLI_COLOR_SUCCESS,
         )
         _print_deployment_info(result.deployment, output)
     except BentoMLException as e:
         _echo(
             f'Failed to create deployment {deployment_name} {str(e)}',
             CLI_COLOR_ERROR,
         )
Пример #15
0
    def get(bento, limit, ascending_order, output):
        if ':' in bento:
            name, version = bento.split(':')
        else:
            name = bento
            version = None
        yatai_client = YataiClient()

        if name and version:
            track_cli('bento-get')
            output = output or 'json'
            get_bento_result = yatai_client.repository.get(name, version)
            if get_bento_result.status.status_code != status_pb2.Status.OK:
                error_code, error_message = status_pb_to_error_code_and_message(
                    get_bento_result.status)
                _echo(
                    f'Failed to get BentoService{name}:{version} '
                    f'{error_code}:{error_message}',
                    CLI_COLOR_ERROR,
                )
                return
            _print_bento_info(get_bento_result.bento, output)
            return
        elif name:
            track_cli('bento-list')
            output = output or 'table'
            list_bento_versions_result = yatai_client.repository.list(
                bento_name=name, limit=limit, ascending_order=ascending_order)
            if list_bento_versions_result.status.status_code != status_pb2.Status.OK:
                error_code, error_message = status_pb_to_error_code_and_message(
                    list_bento_versions_result.status)
                _echo(
                    f'Failed to list versions for BentoService {name} '
                    f'{error_code}:{error_message}',
                    CLI_COLOR_ERROR,
                )
                return

            _print_bentos_info(list_bento_versions_result.bentos, output)
Пример #16
0
 def delete(name, namespace, force):
     yatai_client = YataiClient()
     get_deployment_result = yatai_client.deployment.get(namespace, name)
     if get_deployment_result.status.status_code != status_pb2.Status.OK:
         error_code, error_message = status_pb_to_error_code_and_message(
             get_deployment_result.status
         )
         _echo(
             f'Failed to get deployment {name} for deletion. '
             f'{error_code}:{error_message}',
             CLI_COLOR_ERROR,
         )
         return
     platform = DeploymentSpec.DeploymentOperator.Name(
         get_deployment_result.deployment.spec.operator
     )
     track_cli('deploy-delete', platform)
     result = yatai_client.deployment.delete(name, namespace, force)
     if result.status.status_code != status_pb2.Status.OK:
         error_code, error_message = status_pb_to_error_code_and_message(
             result.status
         )
         _echo(
             f'Failed to delete deployment {name}. {error_code}:{error_message}',
             CLI_COLOR_ERROR,
         )
         return
     extra_properties = {}
     if get_deployment_result.deployment.created_at:
         stopped_time = datetime.utcnow()
         extra_properties['uptime'] = int(
             (
                 stopped_time
                 - get_deployment_result.deployment.created_at.ToDatetime()
             ).total_seconds()
         )
     track_cli('deploy-delete-success', platform, extra_properties)
     _echo('Successfully deleted deployment "{}"'.format(name), CLI_COLOR_SUCCESS)
Пример #17
0
    def serve_gunicorn(port, workers, timeout, bento=None, with_conda=False):
        track_cli('serve_gunicorn')
        bento_service_bundle_path = resolve_bundle_path(
            bento, pip_installed_bundle_path)

        if with_conda:
            run_with_conda_env(
                pip_installed_bundle_path,
                'bentoml serve_gunicorn {bento} -p {port} -w {workers} '
                '--timeout {timeout}'.format(
                    bento=bento_service_bundle_path,
                    port=port,
                    workers=workers,
                    timeout=timeout,
                ),
            )
            return

        from bentoml.server.gunicorn_server import GunicornBentoServer

        gunicorn_app = GunicornBentoServer(bento_service_bundle_path, port,
                                           workers, timeout)
        gunicorn_app.run()
Пример #18
0
    def unset(updates):
        track_cli('config-unset')
        local_config = ConfigParser()
        with open(LOCAL_CONFIG_FILE, 'rb') as config_file:
            local_config.read_string(config_file.read().decode('utf-8'))
        try:
            for update in updates:
                if '.' in update:
                    sec, opt = update.split('.')
                else:
                    sec = 'core'  # default section
                    opt = update

                if not local_config.has_section(sec):
                    local_config.add_section(sec)
                local_config.remove_option(sec.strip(), opt.strip())

            local_config.write(open(LOCAL_CONFIG_FILE, 'w'))
            return
        except ValueError:
            _echo('Wrong config format: %s' % str(updates), CLI_COLOR_ERROR)
            _echo(EXAMPLE_CONFIG_USAGE)
            return
Пример #19
0
 def list_deployments(namespace, limit, labels, order_by, asc, output):
     yatai_client = YataiClient()
     track_cli('deploy-list', PLATFORM_NAME)
     try:
         list_result = yatai_client.deployment.list_lambda_deployments(
             limit=limit,
             labels_query=labels,
             namespace=namespace,
             order_by=order_by,
             ascending_order=asc,
         )
         if list_result.status.status_code != status_pb2.Status.OK:
             error_code, error_message = status_pb_to_error_code_and_message(
                 list_result.status)
             _echo(
                 f'Failed to list AWS Sagemaker deployments. '
                 f'{error_code}:{error_message}',
                 CLI_COLOR_ERROR,
             )
         else:
             _print_deployments_info(list_result.deployments, output)
     except BentoMLException as e:
         _echo(f'Failed to list AWS Sagemaker deployment {str(e)}')
Пример #20
0
    def describe(name, output, namespace):
        track_cli('deploy-describe')
        yatai_service = get_yatai_service()

        result = yatai_service.DescribeDeployment(
            DescribeDeploymentRequest(deployment_name=name,
                                      namespace=namespace))
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to describe deployment {name}. code: {error_code}, message: '
                '{error_message}'.format(
                    name=name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            get_response = yatai_service.GetDeployment(
                GetDeploymentRequest(deployment_name=name,
                                     namespace=namespace))
            deployment_pb = get_response.deployment
            deployment_pb.state.CopyFrom(result.state)
            display_deployment_info(deployment_pb, output)
Пример #21
0
 def list_deployments(namespace, platform, limit, labels, order_by, asc, output):
     yatai_client = YataiClient()
     track_cli('deploy-list')
     try:
         list_result = yatai_client.deployment.list(
             limit=limit,
             labels_query=labels,
             namespace=namespace,
             operator=platform,
             order_by=order_by,
             ascending_order=asc,
         )
         if list_result.status.status_code != status_pb2.Status.OK:
             error_code, error_message = status_pb_to_error_code_and_message(
                 list_result.status
             )
             _echo(
                 f'Failed to list deployments {error_code}:{error_message}',
                 CLI_COLOR_ERROR,
             )
             return
         _print_deployments_info(list_result.deployments, output)
     except BentoMLException as e:
         _echo(f'Failed to list deployments {str(e)}')
Пример #22
0
    def list_deployments_cli(output, limit, filters, labels, namespace,
                             all_namespaces):
        track_cli('deploy-list')
        yatai_client = YataiClient()

        result = yatai_client.deployment.list(
            limit=limit,
            filters=filters,
            labels=parse_key_value_pairs(labels),
            namespace=namespace,
            is_all_namespaces=all_namespaces,
        )
        if result.status.status_code != status_pb2.Status.OK:
            _echo(
                'Failed to list deployments. {error_code}:{error_message}'.
                format(
                    error_code=status_pb2.Status.Code.Name(
                        result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            _print_deployments_info(result.deployments, output)
Пример #23
0
 def apply(deployment_yaml, output, wait):
     track_cli('deploy-apply',
               deployment_yaml.get('spec', {}).get('operator'))
     platform_name = deployment_yaml.get('spec', {}).get('operator')
     deployment_name = deployment_yaml.get('name')
     try:
         yatai_client = YataiClient()
         with Spinner('Applying deployment'):
             result = yatai_client.deployment.apply(deployment_yaml, wait)
         if result.status.status_code != status_pb2.Status.OK:
             error_code, error_message = status_pb_to_error_code_and_message(
                 result.status)
             track_cli(
                 'deploy-apply-failure',
                 platform_name,
                 {
                     'error_code': error_code,
                     'error_message': error_message
                 },
             )
             _echo(
                 f'Failed to apply deployment {deployment_name} '
                 f'{error_code}:{error_message}',
                 CLI_COLOR_ERROR,
             )
             return
         track_cli('deploy-create-success', platform_name)
         _echo(
             f'Successfully applied deployment {deployment_name}',
             CLI_COLOR_SUCCESS,
         )
         _print_deployment_info(result.deployment, output)
     except BentoMLException as e:
         track_cli(
             'deploy-apply-failure',
             platform_name,
             {'error_message': str(e)},
         )
         _echo(
             'Failed to apply deployment {name}. Error message: {message}'.
             format(name=deployment_yaml.get('name'), message=e))
Пример #24
0
 def update(name, namespace, bento, memory_size, timeout, output, wait):
     yatai_client = YataiClient()
     if bento:
         bento_name, bento_version = bento.split(':')
     else:
         bento_name = None
         bento_version = None
     try:
         with Spinner('Updating Lambda deployment '):
             result = yatai_client.deployment.update_lambda_deployment(
                 bento_name=bento_name,
                 bento_version=bento_version,
                 deployment_name=name,
                 namespace=namespace,
                 memory_size=memory_size,
                 timeout=timeout,
                 wait=wait,
             )
             if result.status.status_code != status_pb2.Status.OK:
                 error_code, error_message = status_pb_to_error_code_and_message(
                     result.status)
                 track_cli(
                     'deploy-update-failure',
                     PLATFORM_NAME,
                     {
                         'error_code': error_code,
                         'error_message': error_message
                     },
                 )
                 _echo(
                     f'Failed to update AWS Lambda deployment {name} '
                     f'{error_code}:{error_message}',
                     CLI_COLOR_ERROR,
                 )
                 return
             track_cli('deploy-update-success', PLATFORM_NAME)
             _echo(
                 f'Successfully updated AWS Lambda deployment {name}',
                 CLI_COLOR_SUCCESS,
             )
             _print_deployment_info(result.deployment, output)
     except BentoMLException as e:
         track_cli('deploy-update-failure', PLATFORM_NAME,
                   {'error_message': str(e)})
         _echo(
             f'Failed to updated AWS Lambda deployment {name}: {str(e)}',
             CLI_COLOR_ERROR,
         )
Пример #25
0
 def view_effective():
     track_cli('config-view-effective')
     bentoml_config().write(sys.stdout)
     return
Пример #26
0
 def view():
     track_cli('config-view')
     local_config = ConfigParser()
     local_config.read(get_local_config_file(), encoding='utf-8')
     local_config.write(sys.stdout)
     return
Пример #27
0
    def docs(archive_path=archive_path):
        track_cli('docs')
        bento_service = load(archive_path)

        _echo(json.dumps(get_docs(bento_service), indent=2))
Пример #28
0
    def open_api_spec(archive_path=archive_path):
        track_cli('open-api-spec')
        bento_service = load(archive_path)

        _echo(json.dumps(get_docs(bento_service), indent=2))
Пример #29
0
    def create(
        name,
        bento,
        platform,
        output,
        namespace,
        labels,
        annotations,
        region,
        instance_type,
        instance_count,
        api_name,
        kube_namespace,
        replicas,
        service_name,
        service_type,
        wait,
    ):
        # converting platform parameter to DeploymentOperator name in proto
        # e.g. 'aws-lambda' to 'AWS_LAMBDA'
        track_cli('deploy-create', platform.replace('-', '_').upper())
        bento_name, bento_version = bento.split(':')
        operator_spec = {
            'region': region,
            'instance_type': instance_type,
            'instance_count': instance_count,
            'api_name': api_name,
            'kube_namespace': kube_namespace,
            'replicas': replicas,
            'service_name': service_name,
            'service_type': service_type,
        }
        yatai_service = get_yatai_service()
        result = create_deployment(
            name,
            namespace,
            bento_name,
            bento_version,
            platform,
            operator_spec,
            parse_key_value_pairs(labels),
            parse_key_value_pairs(annotations),
            yatai_service,
        )

        if result.status.status_code != status_pb2.Status.OK:
            _echo(
                'Failed to create deployment {name}. {error_code}:'
                '{error_message}'.format(
                    name=name,
                    error_code=status_pb2.Status.Code.Name(
                        result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            if wait:
                result_state = get_state_after_await_action_complete(
                    yatai_service=yatai_service,
                    name=name,
                    namespace=namespace,
                    message='Creating deployment ',
                )
                if result_state.status.status_code != status_pb2.Status.OK:
                    _echo(
                        'Created deployment {name}, failed to retrieve latest status.'
                        ' {error_code}:{error_message}'.format(
                            name=name,
                            error_code=status_pb2.Status.Code.Name(
                                result_state.status.status_code),
                            error_message=result_state.status.error_message,
                        ))
                    return
                result.deployment.state.CopyFrom(result_state.state)

            track_cli('deploy-create-success',
                      platform.replace('-', '_').upper())
            _echo('Successfully created deployment {}'.format(name),
                  CLI_COLOR_SUCCESS)
            _print_deployment_info(result.deployment, output)
Пример #30
0
    def create(
        name,
        bento,
        platform,
        output,
        namespace,
        labels,
        annotations,
        region,
        instance_type,
        instance_count,
        api_name,
        kube_namespace,
        replicas,
        service_name,
        service_type,
        wait,
    ):
        # converting platform parameter to DeploymentOperator name in proto
        # e.g. 'aws-lambda' to 'AWS_LAMBDA'
        platform = platform.replace('-', '_').upper()
        operator = DeploymentSpec.DeploymentOperator.Value(platform)

        track_cli('deploy-create', platform)

        yatai_service = get_yatai_service()

        # Make sure there is no active deployment with the same deployment name
        get_deployment = yatai_service.GetDeployment(
            GetDeploymentRequest(deployment_name=name, namespace=namespace))
        if get_deployment.status.status_code != Status.NOT_FOUND:
            raise BentoMLDeploymentException(
                'Deployment {name} already existed, please use update or apply command'
                ' instead'.format(name=name))

        if operator == DeploymentSpec.AWS_SAGEMAKER:
            if not api_name:
                raise click.BadParameter(
                    'api-name is required for Sagemaker deployment')

            sagemaker_operator_config = DeploymentSpec.SageMakerOperatorConfig(
                region=region or config().get('aws', 'default_region'),
                instance_count=instance_count
                or config().getint('sagemaker', 'instance_count'),
                instance_type=instance_type
                or config().get('sagemaker', 'instance_type'),
                api_name=api_name,
            )
            spec = DeploymentSpec(
                sagemaker_operator_config=sagemaker_operator_config)
        elif operator == DeploymentSpec.AWS_LAMBDA:
            aws_lambda_operator_config = DeploymentSpec.AwsLambdaOperatorConfig(
                region=region or config().get('aws', 'default_region'))
            if api_name:
                aws_lambda_operator_config.api_name = api_name
            spec = DeploymentSpec(
                aws_lambda_operator_config=aws_lambda_operator_config)
        elif operator == DeploymentSpec.GCP_FUNCTION:
            gcp_function_operator_config = DeploymentSpec.GcpFunctionOperatorConfig(
                region=region
                or config().get('google-cloud', 'default_region'))
            if api_name:
                gcp_function_operator_config.api_name = api_name
            spec = DeploymentSpec(
                gcp_function_operator_config=gcp_function_operator_config)
        elif operator == DeploymentSpec.KUBERNETES:
            kubernetes_operator_config = DeploymentSpec.KubernetesOperatorConfig(
                kube_namespace=kube_namespace,
                replicas=replicas,
                service_name=service_name,
                service_type=service_type,
            )
            spec = DeploymentSpec(
                kubernetes_operator_config=kubernetes_operator_config)
        else:
            raise BentoMLDeploymentException(
                'Custom deployment is not supported in the current version of BentoML'
            )

        bento_name, bento_version = bento.split(':')
        spec.bento_name = bento_name
        spec.bento_version = bento_version
        spec.operator = operator

        result = yatai_service.ApplyDeployment(
            ApplyDeploymentRequest(deployment=Deployment(
                namespace=namespace,
                name=name,
                annotations=parse_key_value_pairs(annotations),
                labels=parse_key_value_pairs(labels),
                spec=spec,
            )))
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to create deployment {name}. {error_code}: '
                '{error_message}'.format(
                    name=name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            if wait:
                result_state = get_state_after_await_action_complete(
                    yatai_service=yatai_service,
                    name=name,
                    namespace=namespace,
                    message='Creating deployment ',
                )
                result.deployment.state.CopyFrom(result_state.state)

            _echo('Successfully created deployment {}'.format(name),
                  CLI_COLOR_SUCCESS)
            display_deployment_info(result.deployment, output)