def delete(name, namespace, force): yatai_service = get_yatai_service() get_deployment_result = get_deployment(namespace, name, yatai_service) if get_deployment_result.status.status_code != status_pb2.Status.OK: _echo( 'Failed to get deployment {} for deletion. {}:{}'.format( name, status_pb2.Status.Code.Name( get_deployment_result.status.status_code), get_deployment_result.status.error_message, ), CLI_COLOR_ERROR, ) return platform = DeploymentSpec.DeploymentOperator.Name( get_deployment_result.deployment.spec.operator) track_cli('deploy-delete', platform) result = delete_deployment(name, namespace, force, yatai_service) if result.status.status_code == status_pb2.Status.OK: extra_properties = {} if get_deployment_result.deployment.created_at: stopped_time = datetime.utcnow() extra_properties['uptime'] = int( (stopped_time - get_deployment_result.deployment.created_at.ToDatetime() ).total_seconds()) track_cli('deploy-delete-success', platform, extra_properties) _echo('Successfully deleted deployment "{}"'.format(name), CLI_COLOR_SUCCESS) else: _echo( 'Failed to delete deployment {name}. code: {error_code}, message: ' '{error_message}'.format( name=name, error_code=status_pb2.Status.Code.Name( result.status.status_code), error_message=result.status.error_message, ), CLI_COLOR_ERROR, )
def apply(deployment_yaml, output, wait): track_cli('deploy-apply', deployment_yaml.get('spec', {}).get('operator')) try: deployment_pb = deployment_yaml_to_pb(deployment_yaml) yatai_service = get_yatai_service() result = yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=deployment_pb)) if result.status.status_code != Status.OK: _echo( 'Failed to apply deployment {name}. code: {error_code}, message: ' '{error_message}'.format( name=deployment_pb.name, error_code=Status.Code.Name(result.status.status_code), error_message=result.status.error_message, ), CLI_COLOR_ERROR, ) else: if wait: result_state = get_state_after_await_action_complete( yatai_service=yatai_service, name=deployment_pb.name, namespace=deployment_pb.namespace, message='Applying deployment', ) result.deployment.state.CopyFrom(result_state.state) _echo( 'Successfully applied spec to deployment {}'.format( deployment_pb.name), CLI_COLOR_SUCCESS, ) display_deployment_info(result.deployment, output) except BentoMLException as e: _echo( 'Failed to apply deployment {name}. Error message: {message}'. format(name=deployment_pb.name, message=e))
def delete(name, namespace, force): yatai_client = YataiClient() get_deployment_result = yatai_client.deployment.get(namespace, name) if get_deployment_result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( get_deployment_result.status ) _echo( f'Failed to get deployment {name} for deletion. ' f'{error_code}:{error_message}', CLI_COLOR_ERROR, ) return platform = DeploymentSpec.DeploymentOperator.Name( get_deployment_result.deployment.spec.operator ) track_cli('deploy-delete', platform) result = yatai_client.deployment.delete(name, namespace, force) if result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( result.status ) _echo( f'Failed to delete deployment {name}. {error_code}:{error_message}', CLI_COLOR_ERROR, ) return extra_properties = {} if get_deployment_result.deployment.created_at: stopped_time = datetime.utcnow() extra_properties['uptime'] = int( ( stopped_time - get_deployment_result.deployment.created_at.ToDatetime() ).total_seconds() ) track_cli('deploy-delete-success', platform, extra_properties) _echo('Successfully deleted deployment "{}"'.format(name), CLI_COLOR_SUCCESS)
def deploy(archive_path, platform, region, stage, api_name, instance_type, instance_count): if platform in SERVERLESS_PLATFORMS: deployment = ServerlessDeployment(archive_path, platform, region, stage) elif platform == "aws-sagemaker": deployment = SagemakerDeployment(archive_path, api_name, region, instance_count, instance_type) else: _echo( "Deploying with --platform=%s is not supported in current version of " "BentoML" % platform, CLI_COLOR_ERROR, ) return output_path = deployment.deploy() _echo( "Successfully deployed to {platform}!".format(platform=platform), CLI_COLOR_SUCCESS, ) _echo("Deployment archive is saved at: %s" % output_path)
def apply(deployment_yaml, output, wait): track_cli('deploy-apply', deployment_yaml.get('spec', {}).get('operator')) platform_name = deployment_yaml.get('spec', {}).get('operator') deployment_name = deployment_yaml.get('name') try: yatai_client = YataiClient() with Spinner('Applying deployment'): result = yatai_client.deployment.apply(deployment_yaml, wait) if result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( result.status ) track_cli( 'deploy-apply-failure', platform_name, {'error_code': error_code, 'error_message': error_message}, ) _echo( f'Failed to apply deployment {deployment_name} ' f'{error_code}:{error_message}', CLI_COLOR_ERROR, ) return track_cli('deploy-create-success', platform_name) _echo( f'Successfully applied deployment {deployment_name}', CLI_COLOR_SUCCESS, ) _print_deployment_info(result.deployment, output) except BentoMLException as e: track_cli( 'deploy-apply-failure', platform_name, {'error_message': str(e)}, ) _echo( 'Failed to apply deployment {name}. Error message: {message}'.format( name=deployment_yaml.get('name'), message=e ) )
def create(deployment_yaml, output, wait): yatai_client = YataiClient() platform_name = deployment_yaml.get('spec', {}).get('operator') deployment_name = deployment_yaml.get('name') track_cli('deploy-create', platform_name) try: with Spinner('Creating deployment '): result = yatai_client.deployment.create(deployment_yaml, wait) if result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( result.status) track_cli( 'deploy-create-failure', platform_name, { 'error_code': error_code, 'error_message': error_message }, ) _echo( f'Failed to create deployment {deployment_name} ' f'{error_code}:{error_message}', CLI_COLOR_ERROR, ) return track_cli('deploy-create-success', platform_name) _echo( f'Successfully created deployment {deployment_name}', CLI_COLOR_SUCCESS, ) _print_deployment_info(result.deployment, output) except BentoMLException as e: _echo( f'Failed to create deployment {deployment_name} {str(e)}', CLI_COLOR_ERROR, )
def delete(bentos, yes): """Delete saved BentoService. BENTO is the target BentoService to be deleted, referenced by its name and version in format of name:version. For example: "iris_classifier:v1.2.0" `bentoml delete` command also supports deleting multiple saved BentoService at once, by providing name version tag separated by ",", for example: `bentoml delete iris_classifier:v1.2.0,my_svc:v1,my_svc2:v3` """ yatai_client = YataiClient() for bento in bentos: name, version = bento.split(':') if not name and not version: _echo( 'BentoService name or version is missing. Please provide in the ' 'format of name:version', CLI_COLOR_ERROR, ) return if not yes and not click.confirm( f'Are you sure about delete {bento}? This will delete the BentoService ' f'saved bundle files permanently'): return result = yatai_client.repository.dangerously_delete_bento( name=name, version=version) if result.status.status_code != status_pb2.Status.OK: error_code, error_message = status_pb_to_error_code_and_message( result.status) _echo( f'Failed to delete Bento {name}:{version} ' f'{error_code}:{error_message}', CLI_COLOR_ERROR, ) _echo(f'BentoService {name}:{version} deleted')
def apply( bento, deployment_name, platform, output, namespace, labels, annotations, region, stage, instance_type, instance_count, api_name, kube_namespace, replicas, service_name, service_type, ): track_cli('deploy-apply', platform) bento_name, bento_verison = bento.split(':') spec = DeploymentSpec( bento_name=bento_name, bento_verison=bento_verison, operator=get_deployment_operator_type(platform), ) if platform == 'aws_sagemaker': spec.sagemaker_operator_config = DeploymentSpec.SageMakerOperatorConfig( region=region, instance_count=instance_count, instance_type=instance_type, api_name=api_name, ) elif platform == 'aws_lambda': spec.aws_lambda_operator_config = DeploymentSpec.AwsLambdaOperatorConfig( region=region, stage=stage) elif platform == 'gcp_function': spec.gcp_function_operator_config = \ DeploymentSpec.GcpFunctionOperatorConfig( region=region, stage=stage ) elif platform == 'kubernetes': spec.kubernetes_operator_config = DeploymentSpec.KubernetesOperatorConfig( kube_namespace=kube_namespace, replicas=replicas, service_name=service_name, service_type=service_type, ) else: raise BentoMLDeploymentException( 'Custom deployment is not supported in current version of BentoML' ) result = get_yatai_service().ApplyDeployment( ApplyDeploymentRequest(deployment=Deployment( namespace=namespace, name=deployment_name, annotations=parse_key_value_pairs(annotations), labels=parse_key_value_pairs(labels), spec=spec, ))) if result.status.status_code != Status.OK: _echo( 'Failed to apply deployment {name}. code: {error_code}, message: ' '{error_message}'.format( name=deployment_name, error_code=Status.Code.Name(result.status.status_code), error_message=result.status.error_message, ), CLI_COLOR_ERROR, ) else: _echo( 'Successfully apply deployment {}'.format(deployment_name), CLI_COLOR_SUCCESS, ) display_deployment_info(result.deployment, output)
def containerize(bento, push, tag, build_arg, username, password): """Containerize specified BentoService. BENTO is the target BentoService to be containerized, referenced by its name and version in format of name:version. For example: "iris_classifier:v1.2.0" `bentoml containerize` command also supports the use of the `latest` tag which will automatically use the last built version of your Bento. You can provide a tag for the image built by Bento using the `--docker-image-tag` flag. Additionally, you can provide a `--push` flag, which will push the built image to the Docker repository specified by the image tag. You can also prefixing the tag with a hostname for the repository you wish to push to. e.g. `bentoml containerize IrisClassifier:latest --push --tag username/iris` would build a Docker image called `username/iris:latest` and push that to Docker Hub. By default, the `containerize` command will use the credentials provided by Docker. You may provide your own through `--username` and `--password`. """ saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path) _echo(f"Found Bento: {saved_bundle_path}") bento_metadata = load_bento_service_metadata(saved_bundle_path) name = to_valid_docker_image_name(bento_metadata.name) version = to_valid_docker_image_version(bento_metadata.version) if not tag: _echo("Tag not specified, using tag parsed from " f"BentoService: '{name}:{version}'") tag = f"{name}:{version}" if ":" not in tag: _echo( "Image version not specified, using version parsed " f"from BentoService: '{version}'", CLI_COLOR_WARNING, ) tag = f"{tag}:{version}" docker_build_args = {} if build_arg: for arg in build_arg: key, value = arg.split("=") docker_build_args[key] = value import docker docker_api = docker.APIClient() try: with Spinner(f"Building Docker image {tag} from {bento} \n"): for line in echo_docker_api_result( docker_api.build( path=saved_bundle_path, tag=tag, decode=True, buildargs=docker_build_args, )): _echo(line) except docker.errors.APIError as error: raise CLIException(f'Could not build Docker image: {error}') _echo( f'Finished building {tag} from {bento}', CLI_COLOR_SUCCESS, ) if push: auth_config_payload = ({ "username": username, "password": password } if username or password else None) try: with Spinner(f"Pushing docker image to {tag}\n"): for line in echo_docker_api_result( docker_api.push( repository=tag, stream=True, decode=True, auth_config=auth_config_payload, )): _echo(line) _echo( f'Pushed {tag} to {name}', CLI_COLOR_SUCCESS, ) except (docker.errors.APIError, BentoMLException) as error: raise CLIException(f'Could not push Docker image: {error}')
def docs(archive_path=archive_path): track_cli('docs') bento_service = load(archive_path) _echo(json.dumps(get_docs(bento_service), indent=2))
def _print_bento_info(bento, output_type): if output_type == 'yaml': _echo(pb_to_yaml(bento)) else: _echo(MessageToJson(bento))
def display_deployment_info(deployment, output): if output == 'yaml': result = pb_to_yaml(deployment) else: result = MessageToJson(deployment) _echo(result)
def create( name, bento, platform, output, namespace, labels, annotations, region, instance_type, instance_count, api_name, kube_namespace, replicas, service_name, service_type, wait, ): # converting platform parameter to DeploymentOperator name in proto # e.g. 'aws-lambda' to 'AWS_LAMBDA' track_cli('deploy-create', platform.replace('-', '_').upper()) bento_name, bento_version = bento.split(':') operator_spec = { 'region': region, 'instance_type': instance_type, 'instance_count': instance_count, 'api_name': api_name, 'kube_namespace': kube_namespace, 'replicas': replicas, 'service_name': service_name, 'service_type': service_type, } yatai_service = get_yatai_service() result = create_deployment( name, namespace, bento_name, bento_version, platform, operator_spec, parse_key_value_pairs(labels), parse_key_value_pairs(annotations), yatai_service, ) if result.status.status_code != status_pb2.Status.OK: _echo( 'Failed to create deployment {name}. {error_code}:' '{error_message}'.format( name=name, error_code=status_pb2.Status.Code.Name( result.status.status_code), error_message=result.status.error_message, ), CLI_COLOR_ERROR, ) else: if wait: result_state = get_state_after_await_action_complete( yatai_service=yatai_service, name=name, namespace=namespace, message='Creating deployment ', ) if result_state.status.status_code != status_pb2.Status.OK: _echo( 'Created deployment {name}, failed to retrieve latest status.' ' {error_code}:{error_message}'.format( name=name, error_code=status_pb2.Status.Code.Name( result_state.status.status_code), error_message=result_state.status.error_message, )) return result.deployment.state.CopyFrom(result_state.state) track_cli('deploy-create-success', platform.replace('-', '_').upper()) _echo('Successfully created deployment {}'.format(name), CLI_COLOR_SUCCESS) _print_deployment_info(result.deployment, output)
def open_api_spec(archive_path=archive_path): track_cli('open-api-spec') bento_service = load(archive_path) _echo(json.dumps(get_docs(bento_service), indent=2))
def update( name, namespace, bento, instance_type, instance_count, num_of_gunicorn_workers_per_instance, api_name, output, wait, ): yatai_client = YataiClient() track_cli('deploy-update') if bento: bento_name, bento_version = bento.split(':') else: bento_name = None bento_version = None try: result = yatai_client.deployment.update_sagemaker_deployment( namespace=namespace, deployment_name=name, bento_name=bento_name, bento_version=bento_version, instance_count=instance_count, instance_type=instance_type, num_of_gunicorn_workers_per_instance= num_of_gunicorn_workers_per_instance, # noqa E501 api_name=api_name, ) except BentoMLException as e: _echo(f'Failed to update deployment {name}: {str(e)}', CLI_COLOR_ERROR) return if result.status.status_code != status_pb2.Status.OK: update_deployment_status = result.status _echo( f'Failed to update deployment {name}. ' f'{status_pb2.Status.Code.Name(update_deployment_status.status_code)}:' f'{update_deployment_status.error_message}', CLI_COLOR_ERROR, ) return else: if wait: result_state = get_state_after_await_action_complete( yatai_client=yatai_client, name=name, namespace=namespace, message='Updating deployment', ) if result_state.status.status_code != status_pb2.Status.OK: error_code = status_pb2.Status.Code.Name( result_state.status.status_code) _echo( f'Updated deployment {name}. Failed to retrieve latest status. ' f'{error_code}:{result_state.status.error_message}') return result.deployment.state.CopyFrom(result_state.state) track_cli( 'deploy-update-success', deploy_platform=DeploymentSpec.DeploymentOperator.Name( result.deployment.spec.operator), ) _echo(f'Successfully updated deployment {name}', CLI_COLOR_SUCCESS) _print_deployment_info(result.deployment, output)
def config(action, updates): create_local_config_file_if_not_found() if action == "view-effective": bentoml_config.write(sys.stdout) return if action == "reset": if os.path.isfile(LOCAL_CONFIG_FILE): LOG.info("Removing existing BentoML config file: %s", LOCAL_CONFIG_FILE) os.remove(LOCAL_CONFIG_FILE) create_local_config_file_if_not_found() return local_config = ConfigParser() with open(LOCAL_CONFIG_FILE, 'rb') as config_file: local_config.read_string(config_file.read().decode('utf-8')) if action == "view": local_config.write(sys.stdout) elif action == "set": try: for update in updates: item, value = update.split('=') if '.' in item: sec, opt = item.split('.') else: sec = 'core' # default section opt = item if not local_config.has_section(sec): local_config.add_section(sec) local_config.set(sec.strip(), opt.strip(), value.strip()) local_config.write(open(LOCAL_CONFIG_FILE, 'w')) except ValueError: _echo('Wrong config format: %s' % str(updates), CLI_COLOR_ERROR) _echo(EXAMPLE_CONFIG_USAGE) return elif action == "unset": try: for update in updates: if '.' in update: sec, opt = update.split('.') else: sec = 'core' # default section opt = update if not local_config.has_section(sec): local_config.add_section(sec) local_config.remove_option(sec.strip(), opt.strip()) local_config.write(open(LOCAL_CONFIG_FILE, 'w')) except ValueError: _echo('Wrong config format: %s' % str(updates), CLI_COLOR_ERROR) _echo(EXAMPLE_CONFIG_USAGE) return else: LOG.error("Unknown command: bentoml config %s", action) return return
def create( name, bento, platform, output, namespace, labels, annotations, region, instance_type, instance_count, api_name, kube_namespace, replicas, service_name, service_type, wait, ): # converting platform parameter to DeploymentOperator name in proto # e.g. 'aws-lambda' to 'AWS_LAMBDA' platform = platform.replace('-', '_').upper() operator = DeploymentSpec.DeploymentOperator.Value(platform) track_cli('deploy-create', platform) yatai_service = get_yatai_service() # Make sure there is no active deployment with the same deployment name get_deployment = yatai_service.GetDeployment( GetDeploymentRequest(deployment_name=name, namespace=namespace)) if get_deployment.status.status_code != Status.NOT_FOUND: raise BentoMLDeploymentException( 'Deployment {name} already existed, please use update or apply command' ' instead'.format(name=name)) if operator == DeploymentSpec.AWS_SAGEMAKER: if not api_name: raise click.BadParameter( 'api-name is required for Sagemaker deployment') sagemaker_operator_config = DeploymentSpec.SageMakerOperatorConfig( region=region or config().get('aws', 'default_region'), instance_count=instance_count or config().getint('sagemaker', 'instance_count'), instance_type=instance_type or config().get('sagemaker', 'instance_type'), api_name=api_name, ) spec = DeploymentSpec( sagemaker_operator_config=sagemaker_operator_config) elif operator == DeploymentSpec.AWS_LAMBDA: aws_lambda_operator_config = DeploymentSpec.AwsLambdaOperatorConfig( region=region or config().get('aws', 'default_region')) if api_name: aws_lambda_operator_config.api_name = api_name spec = DeploymentSpec( aws_lambda_operator_config=aws_lambda_operator_config) elif operator == DeploymentSpec.GCP_FUNCTION: gcp_function_operator_config = DeploymentSpec.GcpFunctionOperatorConfig( region=region or config().get('google-cloud', 'default_region')) if api_name: gcp_function_operator_config.api_name = api_name spec = DeploymentSpec( gcp_function_operator_config=gcp_function_operator_config) elif operator == DeploymentSpec.KUBERNETES: kubernetes_operator_config = DeploymentSpec.KubernetesOperatorConfig( kube_namespace=kube_namespace, replicas=replicas, service_name=service_name, service_type=service_type, ) spec = DeploymentSpec( kubernetes_operator_config=kubernetes_operator_config) else: raise BentoMLDeploymentException( 'Custom deployment is not supported in the current version of BentoML' ) bento_name, bento_version = bento.split(':') spec.bento_name = bento_name spec.bento_version = bento_version spec.operator = operator result = yatai_service.ApplyDeployment( ApplyDeploymentRequest(deployment=Deployment( namespace=namespace, name=name, annotations=parse_key_value_pairs(annotations), labels=parse_key_value_pairs(labels), spec=spec, ))) if result.status.status_code != Status.OK: _echo( 'Failed to create deployment {name}. {error_code}: ' '{error_message}'.format( name=name, error_code=Status.Code.Name(result.status.status_code), error_message=result.status.error_message, ), CLI_COLOR_ERROR, ) else: if wait: result_state = get_state_after_await_action_complete( yatai_service=yatai_service, name=name, namespace=namespace, message='Creating deployment ', ) result.deployment.state.CopyFrom(result_state.state) _echo('Successfully created deployment {}'.format(name), CLI_COLOR_SUCCESS) display_deployment_info(result.deployment, output)
def containerize(bento, push, tag, username, password): """Containerize specified BentoService. BENTO is the target BentoService to be containerized, referenced by its name and version in format of name:version. For example: "iris_classifier:v1.2.0" `bentoml containerize` command also supports the use of the `latest` tag which will automatically use the last built version of your Bento. You can provide a tag for the image built by Bento using the `--docker-image-tag` flag. Additionally, you can provide a `--push` flag, which will push the built image to the Docker repository specified by the image tag. You can also prefixing the tag with a hostname for the repository you wish to push to. e.g. `bentoml containerize IrisClassifier:latest --push --tag username/iris` would build a Docker image called `username/iris:latest` and push that to Docker Hub. By default, the `containerize` command will use the credentials provided by Docker. You may provide your own through `--username` and `--password`. """ bento_service_bundle_path = resolve_bundle_path( bento, pip_installed_bundle_path ) _echo(f"Found Bento: {bento_service_bundle_path}") name, version = get_name_version_from_tag(bento) # use tag name and version where applicable if tag is not None: name, v = get_name_version_from_tag(tag) if v is not None: version = v name, version = make_bento_name_docker_compatible(name, version) full_tag = f"{name}:{version}" if full_tag != bento: _echo( f'Bento tag was changed to be Docker compatible. \n' f'"{bento}" -> "{full_tag}"', CLI_COLOR_WARNING, ) import docker docker_api = docker.APIClient() try: with Spinner(f"Building Docker image: {name}\n"): for line in _echo_docker_api_result( docker_api.build( path=bento_service_bundle_path, tag=full_tag, decode=True, ) ): _echo(line) except docker.errors.APIError as error: raise CLIException(f'Could not build Docker image: {error}') _echo( f'Finished building {full_tag} from {bento}', CLI_COLOR_SUCCESS, ) if push: auth_config_payload = ( {"username": username, "password": password} if username or password else None ) try: with Spinner(f"Pushing docker image to {full_tag}\n"): for line in _echo_docker_api_result( docker_api.push( repository=name, tag=version, stream=True, decode=True, auth_config=auth_config_payload, ) ): _echo(line) _echo( f'Pushed {full_tag} to {name}', CLI_COLOR_SUCCESS, ) except (docker.errors.APIError, BentoMLException) as error: raise CLIException(f'Could not push Docker image: {error}')
def serve_gunicorn( port, workers, timeout, bento=None, with_conda=False, enable_microbatch=False, microbatch_workers=1, ): if not psutil.POSIX: _echo( "The `bentoml server-gunicon` command is only supported on POSIX. " "On windows platform, use `bentoml serve` for local API testing and " "docker for running production API endpoint: " "https://docs.docker.com/docker-for-windows/ " ) return bento_service_bundle_path = resolve_bundle_path( bento, pip_installed_bundle_path ) if with_conda: return run_with_conda_env( pip_installed_bundle_path, 'bentoml serve_gunicorn {bento} -p {port} -w {workers} ' '--timeout {timeout} {flags}'.format( bento=bento_service_bundle_path, port=port, workers=workers, timeout=timeout, flags="--enable-microbatch" if enable_microbatch else "", ), ) if workers is None: workers = get_gunicorn_num_of_workers() # Gunicorn only supports POSIX platforms from bentoml.server.gunicorn_server import GunicornBentoServer from bentoml.server.marshal_server import GunicornMarshalServer if enable_microbatch: prometheus_lock = multiprocessing.Lock() # avoid load model before gunicorn fork with reserve_free_port() as api_server_port: marshal_server = GunicornMarshalServer( bundle_path=bento_service_bundle_path, port=port, workers=microbatch_workers, prometheus_lock=prometheus_lock, outbound_host="localhost", outbound_port=api_server_port, outbound_workers=workers, ) gunicorn_app = GunicornBentoServer( bento_service_bundle_path, api_server_port, workers, timeout, prometheus_lock, ) marshal_server.async_run() gunicorn_app.run() else: gunicorn_app = GunicornBentoServer( bento_service_bundle_path, port, workers, timeout ) gunicorn_app.run()
def open_api_spec(bento=None): saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path) bento_service = load(saved_bundle_path) _echo(json.dumps(get_open_api_spec_json(bento_service), indent=2))
def retrieve(bento, yatai_url, target_dir): yc = get_yatai_client(yatai_url) bento_pb = yc.repository.get(bento) yc.repository.download_to_directory(bento_pb, target_dir) _echo(f'Save {bento} artifact to directory {target_dir}')
def simple_deploy(cortex_name, cortex_type, region, cortex_url, bento_path: Optional[str] = None, direct_path: Optional[str] = None): """ Zips and deploys your module on AWS """ CORTEX_URL = "{cortex_url}cortex?repository_uri={ecr_uri}&cortex_type={cortex_type}&cortex_name={cortex_name}" DOCKER_URL = "{cortex_url}docker?repository_name={repository_name}®ion={region}" def create_unique_name(name: str): name = ''.join([name, "-", str(uuid.uuid4())])[:40] if name[:-1] == "-": name = name[:-1] return name def create_zip_file(dir_path: str): head, tail = os.path.split(dir_path) path = shutil.make_archive( os.path.join(head, create_unique_name(tail)), 'zip', dir_path) head, key_name = os.path.split(path) return path, key_name def create_docker(key_name, zipped_file_path, deploy_region="us-east-1"): repository_name = create_unique_name(key_name.split(".")[0]) docker_url = DOCKER_URL.format(cortex_url=cortex_url, repository_name=repository_name, region=deploy_region) files = [('file', (key_name, open(zipped_file_path, 'rb'), 'application/zip'))] response = requests.request("POST", docker_url, files=files) _echo(response.text) ecr_uri = json.loads(response.text)['ecr_uri'].split(" ")[-1] return ecr_uri def create_cortex_api(ecr_uri, cortx_type, cortx_name): cortex_endpoint = CORTEX_URL.format(cortex_url=cortex_url, ecr_uri=ecr_uri, cortex_type=cortx_type, cortex_name=cortx_name) response = requests.request("GET", cortex_endpoint) backend_api_url = response.text backend_api_url = backend_api_url.replace("\n", "") backend_api_url = json.loads(backend_api_url)['api_endpoint'] return backend_api_url if bento_path: saved_bundle_path = resolve_bundle_path(bento_path, None, None) else: saved_bundle_path = direct_path _echo("Zipping backend files") backend_path, backend_key_name = create_zip_file(saved_bundle_path) _echo("Creating docker image for backend") backend_docker_uri = create_docker(backend_key_name, backend_path, region) _echo("Creating Backend API") backend_cortex_uri = create_cortex_api(backend_docker_uri, cortex_type, cortex_name) _echo(f"Backend API at : {backend_cortex_uri}") return backend_cortex_uri