Exemple #1
0
def get_arn_role_from_current_aws_user():
    sts_client = boto3.client("sts")
    identity = sts_client.get_caller_identity()
    sts_arn = identity["Arn"]
    sts_arn_list = sts_arn.split(":")
    type_role = sts_arn_list[-1].split("/")
    iam_client = boto3.client("iam")
    if type_role[0] in ("user", "root"):
        role_list = iam_client.list_roles()
        arn = None
        for role in role_list["Roles"]:
            policy_document = role["AssumeRolePolicyDocument"]
            statement = policy_document["Statement"][0]
            if (
                statement["Effect"] == "Allow"
                and statement["Principal"]["Service"] == "sagemaker.amazonaws.com"
            ):
                arn = role["Arn"]
        if arn is None:
            raise BentoMLDeploymentException(
                "Can't find proper Arn role for Sagemaker, please create one and try "
                "again"
            )
        return arn
    elif type_role[0] == "role":
        role_response = iam_client.get_role(RoleName=type_role[1])
        return role_response["Role"]["Arn"]

    raise BentoMLDeploymentException(
        "Not supported role type {}; sts arn is {}".format(type_role[0], sts_arn)
    )
Exemple #2
0
def apply_deployment(deployment_info, yatai_service=None):
    if yatai_service is None:
        from bentoml.yatai import get_yatai_service

        yatai_service = get_yatai_service()

    try:
        if isinstance(deployment_info, dict):
            deployment_pb = deployment_dict_to_pb(deployment_info)
        elif isinstance(deployment_info, str):
            deployment_pb = deployment_yaml_string_to_pb(deployment_info)
        else:
            raise BentoMLDeploymentException(
                'Unexpected argument type, expect deployment info to be str in yaml '
                'format or a dict, instead got: {}'.format(str(type(deployment_info)))
            )

        validation_errors = validate_deployment_pb_schema(deployment_pb)
        if validation_errors:
            return ApplyDeploymentResponse(
                status=Status.INVALID_ARGUMENT(
                    'Failed to validate deployment: {errors}'.format(
                        errors=validation_errors
                    )
                )
            )

        return yatai_service.ApplyDeployment(
            ApplyDeploymentRequest(deployment=deployment_pb)
        )
    except BentoMLException as error:
        return ApplyDeploymentResponse(status=Status.INTERNAL(str(error)))
Exemple #3
0
    def apply(self, deployment_pb, yatai_service, prev_deployment=None):
        try:
            ensure_docker_available_or_raise()
            deployment_spec = deployment_pb.spec
            sagemaker_config = deployment_spec.sagemaker_operator_config
            if sagemaker_config is None:
                raise BentoMLDeploymentException('Sagemaker configuration is missing.')

            bento_pb = yatai_service.GetBento(
                GetBentoRequest(
                    bento_name=deployment_spec.bento_name,
                    bento_version=deployment_spec.bento_version,
                )
            )
            if bento_pb.bento.uri.type not in (BentoUri.LOCAL, BentoUri.S3):
                raise BentoMLException(
                    'BentoML currently not support {} repository'.format(
                        bento_pb.bento.uri.type
                    )
                )

            return self._apply(
                deployment_pb,
                bento_pb,
                yatai_service,
                bento_pb.bento.uri.uri,
                prev_deployment,
            )

        except BentoMLException as error:
            return ApplyDeploymentResponse(status=exception_to_return_status(error))
Exemple #4
0
    def describe(self, deployment_pb, repo=None):
        deployment_spec = deployment_pb.spec
        sagemaker_config = deployment_spec.sagemaker_operator_config
        if sagemaker_config is None:
            raise BentoMLDeploymentException(
                'Sagemaker configuration is missing.')
        sagemaker_client = boto3.client('sagemaker', sagemaker_config.region)
        endpoint_name = generate_aws_compatible_string(
            deployment_pb.namespace + '-' + deployment_spec.bento_name)
        try:
            endpoint_status_response = sagemaker_client.describe_endpoint(
                EndpointName=endpoint_name)
        except ClientError as e:
            status = _parse_aws_client_exception_or_raise(e)
            status.error_message = (
                'Failed to describe SageMaker deployment: %s',
                status.error_message,
            )
            return DescribeDeploymentResponse(status=status)

        logger.debug("AWS describe endpoint response: %s",
                     endpoint_status_response)
        endpoint_status = endpoint_status_response["EndpointStatus"]

        service_state = ENDPOINT_STATUS_TO_STATE[endpoint_status]

        deployment_state = DeploymentState(
            state=service_state,
            info_json=json.dumps(endpoint_status_response, default=str),
        )

        return DescribeDeploymentResponse(state=deployment_state,
                                          status=Status.OK())
Exemple #5
0
 def delete(self, name, namespace):
     with create_session(self.sess_maker) as sess:
         try:
             deployment = (sess.query(Deployment).filter_by(
                 name=name, namespace=namespace).one())
             return sess.delete(deployment)
         except NoResultFound:
             raise BentoMLDeploymentException(
                 "Deployment '%s' in namespace: '%s' is not found" % name,
                 namespace)
Exemple #6
0
    def delete(self, deployment_pb, yatai_service=None):
        try:
            deployment_spec = deployment_pb.spec
            sagemaker_config = deployment_spec.sagemaker_operator_config
            if sagemaker_config is None:
                raise BentoMLDeploymentException('Sagemaker configuration is missing.')
            sagemaker_client = boto3.client('sagemaker', sagemaker_config.region)

            endpoint_name = generate_aws_compatible_string(
                deployment_pb.namespace + '-' + deployment_spec.bento_name
            )
            try:
                delete_endpoint_response = sagemaker_client.delete_endpoint(
                    EndpointName=endpoint_name
                )
                logger.debug(
                    "AWS delete endpoint response: %s", delete_endpoint_response
                )
            except ClientError as e:
                status = _parse_aws_client_exception_or_raise(e)
                status.error_message = 'Failed to delete SageMaker endpoint: {}'.format(
                    status.error_message
                )
                return DeleteDeploymentResponse(status=status)

            delete_config_error = _cleanup_sagemaker_endpoint_config(
                client=sagemaker_client,
                name=deployment_spec.bento_name,
                version=deployment_spec.bento_version,
            )
            if delete_config_error:
                delete_config_error.error_message = (
                    'Failed to delete SageMaker endpoint config: %s',
                    delete_config_error.error_message,
                )
                return DeleteDeploymentResponse(status=delete_config_error)

            delete_model_error = _cleanup_sagemaker_model(
                client=sagemaker_client,
                name=deployment_spec.bento_name,
                version=deployment_spec.bento_version,
            )
            if delete_model_error:
                delete_model_error.error_message = (
                    'Failed to delete SageMaker model: %s',
                    delete_model_error.error_message,
                )
                return DeleteDeploymentResponse(status=delete_model_error)

            return DeleteDeploymentResponse(status=Status.OK())
        except BentoMLException as error:
            return DeleteDeploymentResponse(status=exception_to_return_status(error))
Exemple #7
0
def get_deployment_operator(deployment_pb):
    operator = deployment_pb.spec.operator

    if operator == DeploymentOperator.AWS_SAGEMAKER:
        from bentoml.deployment.sagemaker import SageMakerDeploymentOperator

        return SageMakerDeploymentOperator()
    elif operator == DeploymentOperator.AWS_LAMBDA:
        pass
    elif operator == DeploymentOperator.GCP_FUNCTION:
        pass
    elif operator == DeploymentOperator.KUBERNETES:
        pass
    elif operator == DeploymentOperator.CUSTOM:
        pass
    else:
        raise BentoMLDeploymentException("DeployOperator must be set")
Exemple #8
0
    def delete(self, deployment_pb, repo=None):
        deployment_spec = deployment_pb.spec
        sagemaker_config = deployment_spec.sagemaker_operator_config
        if sagemaker_config is None:
            raise BentoMLDeploymentException('Sagemaker configuration is missing.')
        sagemaker_client = boto3.client('sagemaker', sagemaker_config.region)

        endpoint_name = generate_aws_compatible_string(
            deployment_pb.namespace + '-' + deployment_spec.bento_name
        )
        delete_endpoint_response = sagemaker_client.delete_endpoint(
            EndpointName=endpoint_name
        )
        logger.debug("AWS delete endpoint response: %s", delete_endpoint_response)
        if delete_endpoint_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
            # We will also try to delete both model and endpoint configuration for user.
            # Since they are not critical, even they failed, we will still count delete
            # deployment a success action
            model_name = create_sagemaker_model_name(
                deployment_spec.bento_name, deployment_spec.bento_version
            )
            delete_model_response = sagemaker_client.delete_model(ModelName=model_name)
            logger.debug("AWS delete model response: %s", delete_model_response)
            if delete_model_response["ResponseMetadata"]["HTTPStatusCode"] != 200:
                logger.error(
                    "Encounter error when deleting model: %s", delete_model_response
                )

            endpoint_config_name = create_sagemaker_endpoint_config_name(
                deployment_spec.bento_name, deployment_spec.bento_version
            )
            delete_endpoint_config_response = sagemaker_client.delete_endpoint_config(  # noqa: E501
                EndpointConfigName=endpoint_config_name
            )
            logger.debug(
                "AWS delete endpoint config response: %s",
                delete_endpoint_config_response,
            )
            return DeleteDeploymentResponse(status=Status.OK())
        else:
            return DeleteDeploymentResponse(
                status=Status.INTERNAL(str(delete_endpoint_response))
            )
Exemple #9
0
def get_deployment_operator(deployment_pb):
    operator = deployment_pb.spec.operator

    if operator == deployment_pb2.AWS_SAGEMAKER:
        from bentoml.deployment.sagemaker import SageMakerDeploymentOperator

        return SageMakerDeploymentOperator()
    elif operator == deployment_pb2.AWS_LAMBDA:
        pass
    elif operator == deployment_pb2.GCP_FUNCTION:
        raise NotImplementedError(
            "GCP function deployment operator is not implemented")
    elif operator == deployment_pb2.KUBERNETES:
        raise NotImplementedError(
            "Kubernetes deployment operator is not implemented")
    elif operator == deployment_pb2.CUSTOM:
        raise NotImplementedError(
            "Custom deployment operator is not implemented")
    else:
        raise BentoMLDeploymentException("DeployOperator must be set")
Exemple #10
0
def get_deployment_operator(deployment_pb):
    operator = deployment_pb.spec.operator

    if operator == DeploymentSpec.AWS_SAGEMAKER:
        from bentoml.deployment.sagemaker import SageMakerDeploymentOperator

        return SageMakerDeploymentOperator()
    elif operator == DeploymentSpec.AWS_LAMBDA:
        from bentoml.deployment.serverless.aws_lambda import AwsLambdaDeploymentOperator

        return AwsLambdaDeploymentOperator()
    elif operator == DeploymentSpec.GCP_FUNCTION:
        from bentoml.deployment.serverless.gcp_function import (
            GcpFunctionDeploymentOperator, )

        return GcpFunctionDeploymentOperator()
    elif operator == DeploymentSpec.KUBERNETES:
        raise NotImplementedError(
            "Kubernetes deployment operator is not implemented")
    elif operator == DeploymentSpec.CUSTOM:
        raise NotImplementedError(
            "Custom deployment operator is not implemented")
    else:
        raise BentoMLDeploymentException("DeployOperator must be set")
Exemple #11
0
    def apply(self, deployment_pb, repo, prev_deployment=None):
        deployment_spec = deployment_pb.spec
        sagemaker_config = deployment_spec.sagemaker_operator_config
        if sagemaker_config is None:
            raise BentoMLDeploymentException('Sagemaker configuration is missing.')

        archive_path = repo.get(
            deployment_spec.bento_name, deployment_spec.bento_version
        )

        # config = load_bentoml_config(bento_path)...

        sagemaker_client = boto3.client('sagemaker', sagemaker_config.region)

        with TemporarySageMakerContent(
            archive_path, deployment_spec.bento_name, deployment_spec.bento_version
        ) as temp_path:
            ecr_image_path = create_push_image_to_ecr(
                deployment_spec.bento_name, deployment_spec.bento_version, temp_path
            )

        execution_role_arn = get_arn_role_from_current_user()
        model_name = create_sagemaker_model_name(
            deployment_spec.bento_name, deployment_spec.bento_version
        )

        sagemaker_model_info = {
            "ModelName": model_name,
            "PrimaryContainer": {
                "ContainerHostname": model_name,
                "Image": ecr_image_path,
                "Environment": {
                    "API_NAME": sagemaker_config.api_name,
                    "BENTO_SERVER_TIMEOUT": config().get(
                        'apiserver', 'default_timeout'
                    ),
                    "BENTO_SERVER_WORKERS": config().get(
                        'apiserver', 'default_gunicorn_workers_count'
                    ),
                },
            },
            "ExecutionRoleArn": execution_role_arn,
        }
        logger.info("Creating sagemaker model %s", model_name)
        create_model_response = sagemaker_client.create_model(**sagemaker_model_info)
        logger.debug("AWS create model response: %s", create_model_response)

        production_variants = [
            {
                "VariantName": generate_aws_compatible_string(
                    deployment_spec.bento_name
                ),
                "ModelName": model_name,
                "InitialInstanceCount": sagemaker_config.instance_count,
                "InstanceType": sagemaker_config.instance_type,
            }
        ]
        endpoint_config_name = create_sagemaker_endpoint_config_name(
            deployment_spec.bento_name, deployment_spec.bento_version
        )
        logger.info(
            "Creating Sagemaker endpoint %s configuration", endpoint_config_name
        )
        create_endpoint_config_response = sagemaker_client.create_endpoint_config(
            EndpointConfigName=endpoint_config_name,
            ProductionVariants=production_variants,
        )
        logger.debug(
            "AWS create endpoint config response: %s", create_endpoint_config_response
        )

        endpoint_name = generate_aws_compatible_string(
            deployment_pb.namespace + '-' + deployment_spec.bento_name
        )
        if prev_deployment:
            logger.info("Updating sagemaker endpoint %s", endpoint_name)
            update_endpoint_response = sagemaker_client.update_endpoint(
                EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name
            )
            logger.debug("AWS update endpoint response: %s", update_endpoint_response)
        else:
            logger.info("Creating sagemaker endpoint %s", endpoint_name)
            create_endpoint_response = sagemaker_client.create_endpoint(
                EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name
            )
            logger.debug("AWS create endpoint response: %s", create_endpoint_response)

        res_deployment_pb = Deployment(state=DeploymentState())
        res_deployment_pb.CopyFrom(deployment_pb)

        return ApplyDeploymentResponse(status=Status.OK(), deployment=res_deployment_pb)
Exemple #12
0
    def apply(self, deployment_pb, yatai_service, prev_deployment=None):
        try:
            ensure_docker_available_or_raise()
            deployment_spec = deployment_pb.spec
            sagemaker_config = deployment_spec.sagemaker_operator_config
            if sagemaker_config is None:
                raise BentoMLDeploymentException('Sagemaker configuration is missing.')

            bento_pb = yatai_service.GetBento(
                GetBentoRequest(
                    bento_name=deployment_spec.bento_name,
                    bento_version=deployment_spec.bento_version,
                )
            )
            if bento_pb.bento.uri.type != BentoUri.LOCAL:
                raise BentoMLException(
                    'BentoML currently only support local repository'
                )
            else:
                bento_path = bento_pb.bento.uri.uri

            ensure_deploy_api_name_exists_in_bento(
                [api.name for api in bento_pb.bento.bento_service_metadata.apis],
                [sagemaker_config.api_name],
            )

            sagemaker_client = boto3.client('sagemaker', sagemaker_config.region)

            with TempDirectory() as temp_dir:
                sagemaker_project_dir = os.path.jon(
                    temp_dir, deployment_spec.bento_name
                )
                init_sagemaker_project(sagemaker_project_dir, bento_path)
                ecr_image_path = create_push_docker_image_to_ecr(
                    deployment_spec.bento_name,
                    deployment_spec.bento_version,
                    sagemaker_project_dir,
                )

            execution_role_arn = get_arn_role_from_current_aws_user()
            model_name = create_sagemaker_model_name(
                deployment_spec.bento_name, deployment_spec.bento_version
            )

            sagemaker_model_info = {
                "ModelName": model_name,
                "PrimaryContainer": {
                    "ContainerHostname": model_name,
                    "Image": ecr_image_path,
                    "Environment": {
                        "API_NAME": sagemaker_config.api_name,
                        "BENTO_SERVER_TIMEOUT": config().get(
                            'apiserver', 'default_timeout'
                        ),
                        "BENTO_SERVER_WORKERS": config().get(
                            'apiserver', 'default_gunicorn_workers_count'
                        ),
                    },
                },
                "ExecutionRoleArn": execution_role_arn,
            }

            logger.info("Creating sagemaker model %s", model_name)
            try:
                create_model_response = sagemaker_client.create_model(
                    **sagemaker_model_info
                )
                logger.debug("AWS create model response: %s", create_model_response)
            except ClientError as e:
                status = _parse_aws_client_exception_or_raise(e)
                status.error_message = (
                    'Failed to create model for SageMaker Deployment: %s',
                    status.error_message,
                )
                return ApplyDeploymentResponse(status=status, deployment=deployment_pb)

            production_variants = [
                {
                    "VariantName": generate_aws_compatible_string(
                        deployment_spec.bento_name
                    ),
                    "ModelName": model_name,
                    "InitialInstanceCount": sagemaker_config.instance_count,
                    "InstanceType": sagemaker_config.instance_type,
                }
            ]
            endpoint_config_name = create_sagemaker_endpoint_config_name(
                deployment_spec.bento_name, deployment_spec.bento_version
            )

            logger.info(
                "Creating Sagemaker endpoint %s configuration", endpoint_config_name
            )
            try:
                create_config_response = sagemaker_client.create_endpoint_config(
                    EndpointConfigName=endpoint_config_name,
                    ProductionVariants=production_variants,
                )
                logger.debug(
                    "AWS create endpoint config response: %s", create_config_response
                )
            except ClientError as e:
                # create endpoint failed, will remove previously created model
                cleanup_model_error = _cleanup_sagemaker_model(
                    sagemaker_client,
                    deployment_spec.bento_name,
                    deployment_spec.bento_version,
                )
                if cleanup_model_error:
                    cleanup_model_error.error_message = (
                        'Failed to clean up model after unsuccessfully '
                        'create endpoint config: %s',
                        cleanup_model_error.error_message,
                    )
                    return ApplyDeploymentResponse(
                        status=cleanup_model_error, deployment=deployment_pb
                    )

                status = _parse_aws_client_exception_or_raise(e)
                status.error_message = (
                    'Failed to create endpoint config for SageMaker deployment: %s',
                    status.error_message,
                )
                return ApplyDeploymentResponse(status=status, deployment=deployment_pb)

            endpoint_name = generate_aws_compatible_string(
                deployment_pb.namespace + '-' + deployment_spec.bento_name
            )
            try:
                if prev_deployment:
                    logger.debug("Updating sagemaker endpoint %s", endpoint_name)
                    update_endpoint_response = sagemaker_client.update_endpoint(
                        EndpointName=endpoint_name,
                        EndpointConfigName=endpoint_config_name,
                    )
                    logger.debug(
                        "AWS update endpoint response: %s", update_endpoint_response
                    )
                else:
                    logger.debug("Creating sagemaker endpoint %s", endpoint_name)
                    create_endpoint_response = sagemaker_client.create_endpoint(
                        EndpointName=endpoint_name,
                        EndpointConfigName=endpoint_config_name,
                    )
                    logger.debug(
                        "AWS create endpoint response: %s", create_endpoint_response
                    )
            except ClientError as e:
                # create/update endpoint failed, will remove previously created config
                # and then remove the model
                cleanup_endpoint_config_error = _cleanup_sagemaker_endpoint_config(
                    client=sagemaker_client,
                    name=deployment_spec.bento_name,
                    version=deployment_spec.bento_version,
                )
                if cleanup_endpoint_config_error:
                    cleanup_endpoint_config_error.error_message = (
                        'Failed to clean up endpoint config after unsuccessfully '
                        'apply SageMaker deployment: %s',
                        cleanup_endpoint_config_error.error_message,
                    )
                    return ApplyDeploymentResponse(
                        status=cleanup_endpoint_config_error, deployment=deployment_pb
                    )

                cleanup_model_error = _cleanup_sagemaker_model(
                    client=sagemaker_client,
                    name=deployment_spec.bento_name,
                    version=deployment_spec.bento_version,
                )
                if cleanup_model_error:
                    cleanup_model_error.error_message = (
                        'Failed to clean up model after unsuccessfully '
                        'apply SageMaker deployment: %s',
                        cleanup_model_error.error_message,
                    )
                    return ApplyDeploymentResponse(
                        status=cleanup_model_error, deployment=deployment_pb
                    )

                status = _parse_aws_client_exception_or_raise(e)
                status.error_message = (
                    'Failed to apply SageMaker deployment: %s',
                    status.error_message,
                )
                return ApplyDeploymentResponse(status=status, deployment=deployment_pb)

            res_deployment_pb = Deployment(state=DeploymentState())
            res_deployment_pb.CopyFrom(deployment_pb)

            return ApplyDeploymentResponse(
                status=Status.OK(), deployment=res_deployment_pb
            )
        except BentoMLException as error:
            return ApplyDeploymentResponse(status=exception_to_return_status(error))
Exemple #13
0
def create_deployment(
    deployment_name,
    namespace,
    bento_name,
    bento_version,
    platform,
    operator_spec,
    labels=None,
    annotations=None,
    yatai_service=None,
):
    if yatai_service is None:
        from bentoml.yatai import get_yatai_service

        yatai_service = get_yatai_service()

    try:
        # Make sure there is no active deployment with the same deployment name
        get_deployment_pb = yatai_service.GetDeployment(
            GetDeploymentRequest(deployment_name=deployment_name, namespace=namespace)
        )
        if get_deployment_pb.status.status_code == status_pb2.Status.OK:
            raise BentoMLDeploymentException(
                'Deployment "{name}" already existed, use Update or Apply for updating'
                'existing deployment, or create the deployment with a different name or'
                'under a different deployment namespace'.format(name=deployment_name)
            )
        if get_deployment_pb.status.status_code != status_pb2.Status.NOT_FOUND:
            raise BentoMLDeploymentException(
                'Failed accesing YataiService deployment store. {error_code}:'
                '{error_message}'.format(
                    error_code=Status.Name(get_deployment_pb.status.status_code),
                    error_message=get_deployment_pb.status.error_message,
                )
            )

        deployment_dict = {
            "name": deployment_name,
            "namespace": namespace or config().get('deployment', 'default_namespace'),
            "labels": labels,
            "annotations": annotations,
            "spec": {
                "bento_name": bento_name,
                "bento_version": bento_version,
                "operator": platform,
            },
        }

        operator = platform.replace('-', '_').upper()
        try:
            operator_value = DeploymentSpec.DeploymentOperator.Value(operator)
        except ValueError:
            return ApplyDeploymentResponse(
                status=Status.INVALID_ARGUMENT('Invalid platform "{}"'.format(platform))
            )
        if operator_value == DeploymentSpec.AWS_SAGEMAKER:
            deployment_dict['spec']['sagemaker_operator_config'] = {
                'region': operator_spec.get('region')
                or config().get('aws', 'default_region'),
                'instance_count': operator_spec.get('instance_count')
                or config().getint('sagemaker', 'default_instance_count'),
                'instance_type': operator_spec.get('instance_type')
                or config().get('sagemaker', 'default_instance_type'),
                'api_name': operator_spec.get('api_name', ''),
            }
        elif operator_value == DeploymentSpec.AWS_LAMBDA:
            deployment_dict['spec']['aws_lambda_operator_config'] = {
                'region': operator_spec.get('region')
                or config().get('aws', 'default_region')
            }
            if operator_spec.get('api_name'):
                deployment_dict['spec']['aws_lambda_operator_config'][
                    'api_name'
                ] = operator_spec['api_name']
        elif operator_value == DeploymentSpec.GCP_FCUNTION:
            deployment_dict['spec']['gcp_function_operatorConfig'] = {
                'region': operator_spec.get('region')
                or config().get('google-cloud', 'default_region')
            }
            if operator_spec.get('api_name'):
                deployment_dict['spec']['gcp_function_operator_config'][
                    'api_name'
                ] = operator_spec['api_name']
        elif operator_value == DeploymentSpec.KUBERNETES:
            deployment_dict['spec']['kubernetes_operator_config'] = {
                'kube_namespace': operator_spec.get('kube_namespace', ''),
                'replicas': operator_spec.get('replicas', 0),
                'service_name': operator_spec.get('service_name', ''),
                'service_type': operator_spec.get('service_type', ''),
            }
        else:
            raise BentoMLDeploymentException(
                'Platform "{}" is not supported in the current version of '
                'BentoML'.format(platform)
            )

        return apply_deployment(deployment_dict, yatai_service)
    except BentoMLException as error:
        return ApplyDeploymentResponse(status=Status.INTERNAL(str(error)))
Exemple #14
0
    def create(
        name,
        bento,
        platform,
        output,
        namespace,
        labels,
        annotations,
        region,
        instance_type,
        instance_count,
        api_name,
        kube_namespace,
        replicas,
        service_name,
        service_type,
        wait,
    ):
        # converting platform parameter to DeploymentOperator name in proto
        # e.g. 'aws-lambda' to 'AWS_LAMBDA'
        platform = platform.replace('-', '_').upper()
        operator = DeploymentSpec.DeploymentOperator.Value(platform)

        track_cli('deploy-create', platform)

        yatai_service = get_yatai_service()

        # Make sure there is no active deployment with the same deployment name
        get_deployment = yatai_service.GetDeployment(
            GetDeploymentRequest(deployment_name=name, namespace=namespace))
        if get_deployment.status.status_code != Status.NOT_FOUND:
            raise BentoMLDeploymentException(
                'Deployment {name} already existed, please use update or apply command'
                ' instead'.format(name=name))

        if operator == DeploymentSpec.AWS_SAGEMAKER:
            if not api_name:
                raise click.BadParameter(
                    'api-name is required for Sagemaker deployment')

            sagemaker_operator_config = DeploymentSpec.SageMakerOperatorConfig(
                region=region or config().get('aws', 'default_region'),
                instance_count=instance_count
                or config().getint('sagemaker', 'instance_count'),
                instance_type=instance_type
                or config().get('sagemaker', 'instance_type'),
                api_name=api_name,
            )
            spec = DeploymentSpec(
                sagemaker_operator_config=sagemaker_operator_config)
        elif operator == DeploymentSpec.AWS_LAMBDA:
            aws_lambda_operator_config = DeploymentSpec.AwsLambdaOperatorConfig(
                region=region or config().get('aws', 'default_region'))
            if api_name:
                aws_lambda_operator_config.api_name = api_name
            spec = DeploymentSpec(
                aws_lambda_operator_config=aws_lambda_operator_config)
        elif operator == DeploymentSpec.GCP_FUNCTION:
            gcp_function_operator_config = DeploymentSpec.GcpFunctionOperatorConfig(
                region=region
                or config().get('google-cloud', 'default_region'))
            if api_name:
                gcp_function_operator_config.api_name = api_name
            spec = DeploymentSpec(
                gcp_function_operator_config=gcp_function_operator_config)
        elif operator == DeploymentSpec.KUBERNETES:
            kubernetes_operator_config = DeploymentSpec.KubernetesOperatorConfig(
                kube_namespace=kube_namespace,
                replicas=replicas,
                service_name=service_name,
                service_type=service_type,
            )
            spec = DeploymentSpec(
                kubernetes_operator_config=kubernetes_operator_config)
        else:
            raise BentoMLDeploymentException(
                'Custom deployment is not supported in the current version of BentoML'
            )

        bento_name, bento_version = bento.split(':')
        spec.bento_name = bento_name
        spec.bento_version = bento_version
        spec.operator = operator

        result = yatai_service.ApplyDeployment(
            ApplyDeploymentRequest(deployment=Deployment(
                namespace=namespace,
                name=name,
                annotations=parse_key_value_pairs(annotations),
                labels=parse_key_value_pairs(labels),
                spec=spec,
            )))
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to create deployment {name}. {error_code}: '
                '{error_message}'.format(
                    name=name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            if wait:
                result_state = get_state_after_await_action_complete(
                    yatai_service=yatai_service,
                    name=name,
                    namespace=namespace,
                    message='Creating deployment ',
                )
                result.deployment.state.CopyFrom(result_state.state)

            _echo('Successfully created deployment {}'.format(name),
                  CLI_COLOR_SUCCESS)
            display_deployment_info(result.deployment, output)
Exemple #15
0
    def apply(
        bento,
        deployment_name,
        platform,
        output,
        namespace,
        labels,
        annotations,
        region,
        stage,
        instance_type,
        instance_count,
        api_name,
        kube_namespace,
        replicas,
        service_name,
        service_type,
    ):
        track_cli('deploy-apply', platform)

        bento_name, bento_verison = bento.split(':')
        spec = DeploymentSpec(
            bento_name=bento_name,
            bento_verison=bento_verison,
            operator=get_deployment_operator_type(platform),
        )
        if platform == 'aws_sagemaker':
            spec.sagemaker_operator_config = DeploymentSpec.SageMakerOperatorConfig(
                region=region,
                instance_count=instance_count,
                instance_type=instance_type,
                api_name=api_name,
            )
        elif platform == 'aws_lambda':
            spec.aws_lambda_operator_config = DeploymentSpec.AwsLambdaOperatorConfig(
                region=region, stage=stage)
        elif platform == 'gcp_function':
            spec.gcp_function_operator_config = \
                DeploymentSpec.GcpFunctionOperatorConfig(
                region=region, stage=stage
            )
        elif platform == 'kubernetes':
            spec.kubernetes_operator_config = DeploymentSpec.KubernetesOperatorConfig(
                kube_namespace=kube_namespace,
                replicas=replicas,
                service_name=service_name,
                service_type=service_type,
            )
        else:
            raise BentoMLDeploymentException(
                'Custom deployment is not supported in current version of BentoML'
            )

        result = get_yatai_service().ApplyDeployment(
            ApplyDeploymentRequest(deployment=Deployment(
                namespace=namespace,
                name=deployment_name,
                annotations=parse_key_value_pairs(annotations),
                labels=parse_key_value_pairs(labels),
                spec=spec,
            )))
        if result.status.status_code != Status.OK:
            _echo(
                'Failed to apply deployment {name}. code: {error_code}, message: '
                '{error_message}'.format(
                    name=deployment_name,
                    error_code=Status.Code.Name(result.status.status_code),
                    error_message=result.status.error_message,
                ),
                CLI_COLOR_ERROR,
            )
        else:
            _echo(
                'Successfully apply deployment {}'.format(deployment_name),
                CLI_COLOR_SUCCESS,
            )
            display_deployment_info(result.deployment, output)
Exemple #16
0
def deployment_dict_to_pb(deployment_dict):
    deployment_pb = Deployment()
    if deployment_dict.get('name') is not None:
        deployment_pb.name = deployment_dict.get('name')
    if deployment_dict.get('namespace') is not None:
        deployment_pb.namespace = deployment_dict.get('namespace')
    if deployment_dict.get('labels') is not None:
        deployment_pb.labels.update(deployment_dict.get('labels'))
    if deployment_dict.get('annotations') is not None:
        deployment_pb.annotations.update(deployment_dict.get('annotations'))

    if deployment_dict.get('spec'):
        spec_dict = deployment_dict.get('spec')
    else:
        raise BentoMLDeploymentException(
            '"spec" is required field for deployment')
    platform = spec_dict.get('operator')
    if platform is not None:
        # converting platform parameter to DeploymentOperator name in proto
        # e.g. 'aws-lambda' to 'AWS_LAMBDA'
        deployment_pb.spec.operator = DeploymentSpec.DeploymentOperator.Value(
            platform.replace('-', '_').upper())

    if spec_dict.get('bento_name'):
        deployment_pb.spec.bento_name = spec_dict.get('bento_name')
    if spec_dict.get('bento_version'):
        deployment_pb.spec.bento_version = spec_dict.get('bento_version')

    if deployment_pb.spec.operator == DeploymentSpec.AWS_SAGEMAKER:
        sagemaker_config = spec_dict.get('sagemaker_operator_config', {})
        sagemaker_operator_config_pb = deployment_pb.spec.sagemaker_operator_config
        if sagemaker_config.get('api_name'):
            sagemaker_operator_config_pb.api_name = sagemaker_config.get(
                'api_name')
        if sagemaker_config.get('region'):
            sagemaker_operator_config_pb.region = sagemaker_config.get(
                'region')
        if sagemaker_config.get('instance_count'):
            sagemaker_operator_config_pb.instance_count = int(
                sagemaker_config.get('instance_count'))
        if sagemaker_config.get('instance_type'):
            sagemaker_operator_config_pb.instance_type = sagemaker_config.get(
                'instance_type')
    elif deployment_pb.spec.operator == DeploymentSpec.AWS_LAMBDA:
        lambda_config = spec_dict.get('aws_lambda_operator_config', {})
        if lambda_config.get('region'):
            deployment_pb.spec.aws_lambda_operator_config.region = lambda_config.get(
                'region')
        if lambda_config.get('api_name'):
            deployment_pb.spec.aws_lambda_operator_config.api_name = lambda_config.get(
                'api_name')
    elif deployment_pb.spec.operator == DeploymentSpec.GCP_FUNCTION:
        gcp_config = spec_dict.get('gcp_function_operator_config', {})
        if gcp_config.get('region'):
            deployment_pb.spec.gcp_function_operator_config.region = gcp_config.get(
                'region')
        if gcp_config.get('api_name'):
            deployment_pb.spec.aws_lambda_operator_config.api_name = gcp_config.get(
                'api_name')
    elif deployment_pb.spec.operator == DeploymentSpec.KUBERNETES:
        k8s_config = spec_dict.get('kubernetes_operator_config', {})
        k8s_operator_config_pb = deployment_pb.spec.kubernetes_operator_config

        if k8s_config.get('kube_namespace'):
            k8s_operator_config_pb.kube_namespace = k8s_config.get(
                'kube_namespace')
        if k8s_config.get('replicas'):
            k8s_operator_config_pb.replicas = k8s_config.get('replicas')
        if k8s_config.get('service_name'):
            k8s_operator_config_pb.service_name = k8s_config.get(
                'service_name')
        if k8s_config.get('service_type'):
            k8s_operator_config_pb.service_type = k8s_config.get(
                'service_type')
    else:
        raise BentoMLException(
            'Platform "{}" is not supported in the current version of '
            'BentoML'.format(platform))

    return deployment_pb