def apply(self, deployment_info, wait): if isinstance(deployment_info, dict): deployment_pb = deployment_dict_to_pb(deployment_info) elif isinstance(deployment_info, str): deployment_pb = deployment_yaml_string_to_pb(deployment_info) elif isinstance(deployment_info, Deployment): deployment_pb = deployment_info else: raise YataiDeploymentException( 'Unexpected argument type, expect deployment info to be str in yaml ' 'format or a dict or a deployment protobuf obj, instead got: {}' .format(str(type(deployment_info)))) validation_errors = validate_deployment_pb_schema(deployment_pb) if validation_errors: raise YataiDeploymentException( f'Failed to validate deployment {deployment_pb.name}: ' f'{validation_errors}') apply_result = self.yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=deployment_pb)) if apply_result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( apply_result.status) raise YataiDeploymentException(f'{error_code}:{error_message}') if wait: self._wait_deployment_action_complete(deployment_pb.name, deployment_pb.namespace) return self.get(namespace=deployment_pb.namespace, name=deployment_pb.name)
def get_arn_role_from_current_aws_user(): sts_client = boto3.client("sts") identity = sts_client.get_caller_identity() sts_arn = identity["Arn"] sts_arn_list = sts_arn.split(":") type_role = sts_arn_list[-1].split("/") iam_client = boto3.client("iam") if type_role[0] in ("user", "root"): role_list = iam_client.list_roles() arn = None for role in role_list["Roles"]: policy_document = role["AssumeRolePolicyDocument"] statement = policy_document["Statement"][0] if ("Service" in statement["Principal"] and statement["Effect"] == "Allow" and "sagemaker.amazonaws.com" in statement["Principal"]["Service"]): arn = role["Arn"] if arn is None: raise YataiDeploymentException( "Can't find proper Arn role for Sagemaker, please create one and try " "again") return arn elif type_role[0] in ["role", "assumed-role"]: role_response = iam_client.get_role(RoleName=type_role[1]) return role_response["Role"]["Arn"] raise YataiDeploymentException( "Not supported role type {}; sts arn is {}".format( type_role[0], sts_arn))
def _build_and_push_docker_image_to_azure_container_registry( azure_functions_project_dir, container_registry_name, resource_group_name, bento_name, bento_version, bento_python_version, ): _login_acr_registry(container_registry_name, resource_group_name) docker_client = docker.from_env() major, minor, _ = bento_python_version.split('.') try: docker_client.ping() except docker.errors.APIError as err: raise YataiDeploymentException( f'Failed to get response from docker server: {str(err)}' ) tag = f'{container_registry_name}.azurecr.io/{bento_name}:{bento_version}'.lower() logger.debug(f'Building docker image {tag}') try: docker_client.images.build( path=azure_functions_project_dir, dockerfile=os.path.join(azure_functions_project_dir, 'Dockerfile-azure'), tag=tag, buildargs={ 'BENTOML_VERSION': LAST_PYPI_RELEASE_VERSION, 'PYTHON_VERSION': major + minor, }, ) logger.debug('Finished building docker image') except docker.errors.BuildError as e: raise YataiDeploymentException( f'Failed to build docker image. BuildError: {str(e)}' ) except docker.errors.APIError as e: raise YataiDeploymentException( f'Failed to build docker image. APIError: {str(e)}' ) logger.debug(f'Pushing docker image {tag}') try: docker_client.images.push(tag) logger.debug('Finished pushing docker image') except docker.errors.APIError as e: raise YataiDeploymentException( f'Failed to push docker image. APIError: {str(e)}' ) return tag
def create(self, deployment_info, wait): from bentoml.yatai.validator import validate_deployment_pb if isinstance(deployment_info, dict): deployment_pb = deployment_dict_to_pb(deployment_info) elif isinstance(deployment_info, str): deployment_pb = deployment_yaml_string_to_pb(deployment_info) elif isinstance(deployment_info, Deployment): deployment_pb = deployment_info else: raise YataiDeploymentException( 'Unexpected argument type, expect deployment info to be str in yaml ' 'format or a dict or a deployment protobuf obj, instead got: {}'.format( str(type(deployment_info)) ) ) validation_errors = validate_deployment_pb(deployment_pb) if validation_errors: raise YataiDeploymentException( f'Failed to validate deployment {deployment_pb.name}: ' f'{validation_errors}' ) # Make sure there is no active deployment with the same deployment name get_deployment_pb = self.yatai_service.GetDeployment( GetDeploymentRequest( deployment_name=deployment_pb.name, namespace=deployment_pb.namespace ) ) if get_deployment_pb.status.status_code != status_pb2.Status.NOT_FOUND: raise BentoMLException( f'Deployment "{deployment_pb.name}" already existed, use Update or ' f'Apply for updating existing deployment, delete the deployment, ' f'or use a different deployment name' ) apply_result = self.yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=deployment_pb) ) if apply_result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( apply_result.status ) raise YataiDeploymentException(f'{error_code}:{error_message}') if wait: self._wait_deployment_action_complete( deployment_pb.name, deployment_pb.namespace ) return self.get(namespace=deployment_pb.namespace, name=deployment_pb.name)
def _update(self, deployment_pb, current_deployment, bento_pb, bento_path): if loader._is_remote_path(bento_path): with loader._resolve_remote_bundle_path(bento_path) as local_path: return self._update(deployment_pb, current_deployment, bento_pb, local_path) updated_deployment_spec = deployment_pb.spec updated_lambda_deployment_config = ( updated_deployment_spec.aws_lambda_operator_config) updated_bento_service_metadata = bento_pb.bento.bento_service_metadata describe_result = self.describe(deployment_pb) if describe_result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( describe_result.status) raise YataiDeploymentException( f'Failed fetching Lambda deployment current status - ' f'{error_code}:{error_message}') latest_deployment_state = json.loads(describe_result.state.info_json) if 's3_bucket' in latest_deployment_state: lambda_s3_bucket = latest_deployment_state['s3_bucket'] else: raise BentoMLException( 'S3 Bucket is missing in the AWS Lambda deployment, please make sure ' 'it exists and try again') _deploy_lambda_function( deployment_pb=deployment_pb, bento_service_metadata=updated_bento_service_metadata, deployment_spec=updated_deployment_spec, lambda_s3_bucket=lambda_s3_bucket, lambda_deployment_config=updated_lambda_deployment_config, bento_path=bento_path, ) return ApplyDeploymentResponse(deployment=deployment_pb, status=Status.OK())
def apply_deployment(deployment_info, yatai_service=None): if yatai_service is None: from bentoml.yatai import get_yatai_service yatai_service = get_yatai_service() try: if isinstance(deployment_info, dict): deployment_pb = deployment_dict_to_pb(deployment_info) elif isinstance(deployment_info, str): deployment_pb = deployment_yaml_string_to_pb(deployment_info) else: raise YataiDeploymentException( 'Unexpected argument type, expect deployment info to be str in yaml ' 'format or a dict, instead got: {}'.format( str(type(deployment_info)))) validation_errors = validate_deployment_pb_schema(deployment_pb) if validation_errors: return ApplyDeploymentResponse(status=Status.INVALID_ARGUMENT( 'Failed to validate deployment: {errors}'.format( errors=validation_errors))) return yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=deployment_pb)) except BentoMLException as error: return ApplyDeploymentResponse(status=Status.INTERNAL(str(error)))
def get_deployment_operator(yatai_service, deployment_pb): operator = deployment_pb.spec.operator if operator == DeploymentSpec.AWS_SAGEMAKER: from bentoml.yatai.deployment.sagemaker.operator import ( SageMakerDeploymentOperator, ) return SageMakerDeploymentOperator(yatai_service) elif operator == DeploymentSpec.AWS_LAMBDA: from bentoml.yatai.deployment.aws_lambda.operator import ( AwsLambdaDeploymentOperator, ) return AwsLambdaDeploymentOperator(yatai_service) elif operator == DeploymentSpec.AZURE_FUNCTIONS: from bentoml.yatai.deployment.azure_functions.operator import ( AzureFunctionsDeploymentOperator, ) return AzureFunctionsDeploymentOperator(yatai_service) elif operator == DeploymentSpec.AWS_EC2: from bentoml.yatai.deployment.aws_ec2.operator import AwsEc2DeploymentOperator return AwsEc2DeploymentOperator(yatai_service) elif operator == DeploymentSpec.CUSTOM: raise NotImplementedError( "Custom deployment operator is not supported in current version of BentoML" ) else: raise YataiDeploymentException("DeployOperator must be set")
def add(self, deployment_pb): try: deployment_spec = deployment_pb.spec sagemaker_config = deployment_spec.sagemaker_operator_config sagemaker_config.region = (sagemaker_config.region or get_default_aws_region()) if sagemaker_config is None: raise YataiDeploymentException( "Sagemaker configuration is missing.") bento_pb = self.yatai_service.GetBento( GetBentoRequest( bento_name=deployment_spec.bento_name, bento_version=deployment_spec.bento_version, )) if bento_pb.bento.uri.type not in (BentoUri.LOCAL, BentoUri.S3): raise BentoMLException( "BentoML currently not support {} repository".format( BentoUri.StorageType.Name(bento_pb.bento.uri.type))) return self._add(deployment_pb, bento_pb, bento_pb.bento.uri.uri) except BentoMLException as error: deployment_pb.state.state = DeploymentState.ERROR deployment_pb.state.error_message = ( f"Error creating SageMaker deployment: {str(error)}") return ApplyDeploymentResponse(status=error.status_proto, deployment=deployment_pb)
def get_deployment_operator(yatai_service, deployment_pb): operator = deployment_pb.spec.operator if operator == DeploymentSpec.AWS_SAGEMAKER: from bentoml.yatai.deployment.sagemaker.operator import ( SageMakerDeploymentOperator, ) return SageMakerDeploymentOperator(yatai_service) elif operator == DeploymentSpec.AWS_LAMBDA: from bentoml.yatai.deployment.aws_lambda.operator import ( AwsLambdaDeploymentOperator, ) return AwsLambdaDeploymentOperator(yatai_service) elif operator == DeploymentSpec.GCP_FUNCTION: raise NotImplementedError( "GCP Function deployment operator is not supported in current version of " "BentoML") elif operator == DeploymentSpec.KUBERNETES: raise NotImplementedError( "Kubernetes deployment operator is not supported in current version of " "BentoML") elif operator == DeploymentSpec.CUSTOM: raise NotImplementedError( "Custom deployment operator is not supported in current version of BentoML" ) else: raise YataiDeploymentException("DeployOperator must be set")
def _assert_az_cli_logged_in(): account_info = _call_az_cli( command=['az', 'account', 'show'], message='show Azure account' ) if not account_info['user'] or not account_info['homeTenantId']: raise YataiDeploymentException( 'A signed in Azure CLI is required for Azure Function deployment' )
def delete(self, name, namespace): with create_session(self.sess_maker) as sess: try: deployment = (sess.query(Deployment).filter_by( name=name, namespace=namespace).one()) return sess.delete(deployment) except NoResultFound: raise YataiDeploymentException( "Deployment '%s' in namespace: '%s' is not found" % name, namespace)
def validate_tag(ctx, param, tag): # pylint: disable=unused-argument if tag is None: return tag if ":" in tag: name, version = tag.split(":")[:2] else: name, version = tag, None valid_name_pattern = re.compile( r""" ^( [a-z0-9]+ # alphanumeric (.|_{1,2}|-+)? # seperators )*$ """, re.VERBOSE, ) valid_version_pattern = re.compile( r""" ^ [a-zA-Z0-9] # cant start with .- [ -~]{,127} # ascii match rest, cap at 128 $ """, re.VERBOSE, ) if not valid_name_pattern.match(name): raise YataiDeploymentException( f"Provided Docker Image tag {tag} is invalid. " "Name components may contain lowercase letters, digits " "and separators. A separator is defined as a period, " "one or two underscores, or one or more dashes.") if version and not valid_version_pattern.match(version): raise YataiDeploymentException( f"Provided Docker Image tag {tag} is invalid. " "A tag name must be valid ASCII and may contain " "lowercase and uppercase letters, digits, underscores, " "periods and dashes. A tag name may not start with a period " "or a dash and may contain a maximum of 128 characters.") return tag
def deployment_dict_to_pb(deployment_dict): deployment_pb = Deployment() if deployment_dict.get('spec'): spec_dict = deployment_dict.get('spec') else: raise YataiDeploymentException( '"spec" is required field for deployment') platform = spec_dict.get('operator') if platform is not None: # converting platform parameter to DeploymentOperator name in proto # e.g. 'aws-lambda' to 'AWS_LAMBDA' deployment_pb.spec.operator = DeploymentSpec.DeploymentOperator.Value( platform.replace('-', '_').upper()) for field in ['name', 'namespace']: if deployment_dict.get(field): deployment_pb.__setattr__(field, deployment_dict.get(field)) if deployment_dict.get('labels') is not None: deployment_pb.labels.update(deployment_dict.get('labels')) if deployment_dict.get('annotations') is not None: deployment_pb.annotations.update(deployment_dict.get('annotations')) if spec_dict.get('bento_name'): deployment_pb.spec.bento_name = spec_dict.get('bento_name') if spec_dict.get('bento_version'): deployment_pb.spec.bento_version = spec_dict.get('bento_version') if deployment_pb.spec.operator == DeploymentSpec.AWS_SAGEMAKER: sagemaker_config = spec_dict.get('sagemaker_operator_config', {}) sagemaker_config_pb = deployment_pb.spec.sagemaker_operator_config for field in [ 'region', 'api_name', 'instance_type', 'num_of_gunicorn_workers_per_instance', 'instance_count', ]: if sagemaker_config.get(field): sagemaker_config_pb.__setattr__(field, sagemaker_config.get(field)) elif deployment_pb.spec.operator == DeploymentSpec.AWS_LAMBDA: lambda_conf = spec_dict.get('aws_lambda_operator_config', {}) for field in ['region', 'api_name', 'memory_size', 'timeout']: if lambda_conf.get(field): deployment_pb.spec.aws_lambda_operator_config.__setattr__( field, lambda_conf.get(field)) else: raise InvalidArgument( 'Platform "{}" is not supported in the current version of ' 'BentoML'.format(platform)) return deployment_pb
def delete(sess, name, namespace): try: deployment = ( sess.query(Deployment).filter_by(name=name, namespace=namespace).one() ) LabelStore.delete( sess, resource_type=RESOURCE_TYPE.deployment, resource_id=deployment.id, ) return sess.delete(deployment) except NoResultFound: raise YataiDeploymentException( "Deployment '%s' in namespace: '%s' is not found" % name, namespace )
def delete(self, name, namespace): with create_session(self.sess_maker) as sess: try: deployment = (sess.query(Deployment).filter_by( name=name, namespace=namespace).one()) delete_labels( sess, resource_type=RESOURCE_TYPE.deployment, resource_id=deployment.id, ) return sess.delete(deployment) except NoResultFound: raise YataiDeploymentException( "Deployment '%s' in namespace: '%s' is not found" % name, namespace)
def add(self, deployment_pb): try: deployment_spec = deployment_pb.spec if not deployment_spec.azure_functions_operator_config.location: raise YataiDeploymentException( 'Azure Functions parameter "location" is missing') bento_repo_pb = self.yatai_service.GetBento( GetBentoRequest( bento_name=deployment_spec.bento_name, bento_version=deployment_spec.bento_version, )) return self._add(deployment_pb, bento_repo_pb.bento, bento_repo_pb.bento.uri.uri) except BentoMLException as error: deployment_pb.state.state = DeploymentState.ERROR deployment_pb.state.error_message = f'Error: {str(error)}' return ApplyDeploymentResponse(status=error.status_proto, deployment=deployment_pb)
def apply_deployment(self, deployment_info): if isinstance(deployment_info, dict): deployment_pb = deployment_dict_to_pb(deployment_info) elif isinstance(deployment_info, str): deployment_pb = deployment_yaml_string_to_pb(deployment_info) elif isinstance(deployment_info, Deployment): deployment_pb = deployment_info else: raise YataiDeploymentException( 'Unexpected argument type, expect deployment info to be str in yaml ' 'format or a dict or a deployment protobuf obj, instead got: {}' .format(str(type(deployment_info)))) validation_errors = validate_deployment_pb_schema(deployment_pb) if validation_errors: return ApplyDeploymentResponse(status=Status.INVALID_ARGUMENT( 'Failed to validate deployment: {errors}'.format( errors=validation_errors))) return self.yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=deployment_pb))
def _update(self, deployment_pb, previous_deployment_pb, bento_path, region): if loader._is_remote_path(bento_path): with loader._resolve_remote_bundle_path(bento_path) as local_path: return self._update( deployment_pb, previous_deployment_pb, local_path, region ) updated_deployment_spec = deployment_pb.spec updated_deployment_config = updated_deployment_spec.aws_ec2_operator_config describe_result = self.describe(deployment_pb) if describe_result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( describe_result.status ) raise YataiDeploymentException( f"Failed fetching ec2 deployment current status - " f"{error_code}:{error_message}" ) previous_deployment_state = json.loads(describe_result.state.info_json) if "S3Bucket" in previous_deployment_state: s3_bucket_name = previous_deployment_state.get("S3Bucket") else: raise BentoMLException( "S3 Bucket is missing in the AWS EC2 deployment, please make sure " "it exists and try again" ) self.deploy_service( deployment_pb, updated_deployment_spec, bento_path, updated_deployment_config, s3_bucket_name, region, ) return ApplyDeploymentResponse(status=Status.OK(), deployment=deployment_pb)
def create_deployment( deployment_name, namespace, bento_name, bento_version, platform, operator_spec, labels=None, annotations=None, yatai_service=None, ): if yatai_service is None: from bentoml.yatai import get_yatai_service yatai_service = get_yatai_service() # Make sure there is no active deployment with the same deployment name get_deployment_pb = yatai_service.GetDeployment( GetDeploymentRequest(deployment_name=deployment_name, namespace=namespace)) if get_deployment_pb.status.status_code == status_pb2.Status.OK: raise YataiDeploymentException( 'Deployment "{name}" already existed, use Update or Apply for updating ' 'existing deployment, delete the deployment, or use a different deployment ' 'name'.format(name=deployment_name)) if get_deployment_pb.status.status_code != status_pb2.Status.NOT_FOUND: raise YataiDeploymentException( 'Failed accesing YataiService deployment store. {error_code}:' '{error_message}'.format( error_code=Status.Name(get_deployment_pb.status.status_code), error_message=get_deployment_pb.status.error_message, )) deployment_dict = { "name": deployment_name, "namespace": namespace or config().get('deployment', 'default_namespace'), "labels": labels, "annotations": annotations, "spec": { "bento_name": bento_name, "bento_version": bento_version, "operator": platform, }, } operator = platform.replace('-', '_').upper() try: operator_value = DeploymentSpec.DeploymentOperator.Value(operator) except ValueError: return ApplyDeploymentResponse(status=Status.INVALID_ARGUMENT( 'Invalid platform "{}"'.format(platform))) if operator_value == DeploymentSpec.AWS_SAGEMAKER: deployment_dict['spec']['sagemaker_operator_config'] = { 'region': operator_spec.get('region') or config().get('aws', 'default_region'), 'instance_count': operator_spec.get('instance_count'), 'instance_type': operator_spec.get('instance_type'), 'api_name': operator_spec.get('api_name', ''), } if operator_spec.get('num_of_gunicorn_workers_per_instance'): deployment_dict['spec']['sagemaker_operator_config'][ 'num_of_gunicorn_workers_per_instance'] = operator_spec.get( 'num_of_gunicorn_workers_per_instance') elif operator_value == DeploymentSpec.AWS_LAMBDA: deployment_dict['spec']['aws_lambda_operator_config'] = { 'region': operator_spec.get('region') or config().get('aws', 'default_region') } for field in ['api_name', 'memory_size', 'timeout']: if operator_spec.get(field): deployment_dict['spec']['aws_lambda_operator_config'][ field] = operator_spec[field] elif operator_value == DeploymentSpec.KUBERNETES: deployment_dict['spec']['kubernetes_operator_config'] = { 'kube_namespace': operator_spec.get('kube_namespace', ''), 'replicas': operator_spec.get('replicas', 0), 'service_name': operator_spec.get('service_name', ''), 'service_type': operator_spec.get('service_type', ''), } else: raise YataiDeploymentException( 'Platform "{}" is not supported in the current version of ' 'BentoML'.format(platform)) apply_response = apply_deployment(deployment_dict, yatai_service) if apply_response.status.status_code == status_pb2.Status.OK: describe_response = describe_deployment(deployment_name, namespace, yatai_service) if describe_response.status.status_code == status_pb2.Status.OK: deployment_state = describe_response.state apply_response.deployment.state.CopyFrom(deployment_state) return apply_response return apply_response
def containerize_bento_service( bento_name, bento_version, saved_bundle_path, push=False, tag=None, build_arg=None, username=None, password=None, ): """Containerize specified BentoService. BENTO is the target BentoService to be containerized, referenced by its name and version in format of name:version. For example: "iris_classifier:v1.2.0" `bentoml containerize` command also supports the use of the `latest` tag which will automatically use the last built version of your Bento. You can provide a tag for the image built by Bento using the `--docker-image-tag` flag. Additionally, you can provide a `--push` flag, which will push the built image to the Docker repository specified by the image tag. You can also prefixing the tag with a hostname for the repository you wish to push to. e.g. `bentoml containerize IrisClassifier:latest --push --tag username/iris` would build a Docker image called `username/iris:latest` and push that to Docker Hub. By default, the `containerize` command will use the credentials provided by Docker. You may provide your own through `--username` and `--password`. """ name = to_valid_docker_image_name(bento_name) version = to_valid_docker_image_version(bento_version) if not tag: tag = f"{name}:{version}" if ":" not in tag: tag = f"{tag}:{version}" docker_build_args = {} if build_arg: for arg in build_arg: key, value = arg.split("=") docker_build_args[key] = value import docker docker_api = docker.APIClient() try: logger.info("Building image") for line in docker_api.build( path=saved_bundle_path, tag=tag, decode=True, buildargs=docker_build_args, ): logger.debug(line) except docker.errors.APIError as error: raise YataiDeploymentException( f"Could not build Docker image: {error}") if push: auth_config_payload = ({ "username": username, "password": password } if username or password else None) try: logger.info("Pushing image") for line in docker_api.push( repository=tag, stream=True, decode=True, auth_config=auth_config_payload, ): logger.debug(line) except docker.errors.APIError as error: raise YataiDeploymentException( f"Could not push Docker image: {error}") return tag