Пример #1
0
def generate_sagemaker_snapshot(name, version, archive_path):
    snapshot_path = generate_bentoml_deployment_snapshot_path(
        name, version, "aws-sagemaker")
    shutil.copytree(archive_path, snapshot_path)
    with open(os.path.join(snapshot_path, "nginx.conf"), "w") as f:
        f.write(DEFAULT_NGINX_CONFIG)
    with open(os.path.join(snapshot_path, "wsgi.py"), "w") as f:
        f.write(DEFAULT_WSGI_PY)
    with open(os.path.join(snapshot_path, "serve"), "w") as f:
        f.write(DEFAULT_SERVE_SCRIPT)

    # permission 755 is required for entry script 'serve'
    permission = "755"
    octal_permission = int(permission, 8)
    os.chmod(os.path.join(snapshot_path, "serve"), octal_permission)
    return snapshot_path
Пример #2
0
def generate_serverless_bundle(bento_service, platform, archive_path,
                               additional_options):
    check_serverless_compatiable_version()

    provider = SERVERLESS_PROVIDER[platform]
    output_path = generate_bentoml_deployment_snapshot_path(
        bento_service.name, platform)
    Path(output_path).mkdir(parents=True, exist_ok=False)

    # Calling serverless command to generate templated project
    subprocess.call([
        'serverless', 'create', '--template', provider, '--name',
        bento_service.name
    ],
                    cwd=output_path)
    if platform == 'google-python':
        create_gcp_function_bundle(bento_service, output_path,
                                   additional_options)
    elif platform == 'aws-lambda' or platform == 'aws-lambda-py2':
        # Installing two additional plugins to make it works for AWS lambda
        # serverless-python-requirements will packaging required python modules, and automatically
        # compress and create layer
        subprocess.call([
            'serverless', 'plugin', 'install', '-n',
            'serverless-python-requirements'
        ],
                        cwd=output_path)
        subprocess.call([
            'serverless', 'plugin', 'install', '-n', 'serverless-apigw-binary'
        ],
                        cwd=output_path)
        create_aws_lambda_bundle(bento_service, output_path,
                                 additional_options)
    else:
        raise BentoMLException(
            ("{provider} is not supported in current version of BentoML",
             provider))

    shutil.copy(os.path.join(archive_path, 'requirements.txt'), output_path)

    model_serivce_archive_path = os.path.join(output_path, bento_service.name)
    shutil.copytree(archive_path, model_serivce_archive_path)

    return os.path.realpath(output_path)
Пример #3
0
    def _generate_bundle(self):
        output_path = generate_bentoml_deployment_snapshot_path(
            self.bento_service.name, self.bento_service.version, self.platform)
        Path(output_path).mkdir(parents=True, exist_ok=False)

        # Calling serverless command to generate templated project
        call_serverless_command(
            [
                "serverless",
                "create",
                "--template",
                self.provider,
                "--name",
                self.bento_service.name,
            ],
            output_path,
        )

        if self.platform == "google-python":
            create_gcp_function_bundle(self.bento_service, output_path,
                                       self.region, self.stage)
        elif self.platform == "aws-lambda" or self.platform == "aws-lambda-py2":
            # Installing two additional plugins to make it works for AWS lambda
            # serverless-python-requirements will packaging required python modules,
            # and automatically compress and create layer
            install_serverless_plugin("serverless-python-requirements",
                                      output_path)
            install_serverless_plugin("serverless-apigw-binary", output_path)

            create_aws_lambda_bundle(self.bento_service, output_path,
                                     self.region, self.stage)
        else:
            raise BentoMLException(
                "%s is not supported in current version of BentoML" %
                self.provider)

        shutil.copy(os.path.join(self.archive_path, "requirements.txt"),
                    output_path)
        model_serivce_archive_path = os.path.join(output_path,
                                                  self.bento_service.name)
        shutil.copytree(self.archive_path, model_serivce_archive_path)

        return os.path.realpath(output_path)
Пример #4
0
    def _generate_bundle(self):
        output_path = generate_bentoml_deployment_snapshot_path(
            self.bento_service.name, self.bento_service.version, self.platform)
        Path(output_path).mkdir(parents=True, exist_ok=False)

        # Calling serverless command to generate templated project
        with subprocess.Popen([
                'serverless', 'create', '--template', self.provider, '--name',
                self.bento_service.name
        ],
                              cwd=output_path,
                              stdout=PIPE,
                              stderr=PIPE) as proc:
            response = parse_serverless_response(
                proc.stdout.read().decode('utf-8'))
            logger.debug('Serverless response: %s', '\n'.join(response))

        if self.platform == 'google-python':
            create_gcp_function_bundle(self.bento_service, output_path,
                                       self.region, self.stage)
        elif self.platform == 'aws-lambda' or self.platform == 'aws-lambda-py2':
            # Installing two additional plugins to make it works for AWS lambda
            # serverless-python-requirements will packaging required python modules,
            # and automatically compress and create layer
            with subprocess.Popen([
                    'serverless', 'plugin', 'install', '-n',
                    'serverless-python-requirements'
            ],
                                  cwd=output_path,
                                  stdout=PIPE,
                                  stderr=PIPE) as proc:
                response = parse_serverless_response(
                    proc.stdout.read().decode('utf-8'))
                logger.debug('Serverless response: %s', '\n'.join(response))

            with subprocess.Popen([
                    'serverless', 'plugin', 'install', '-n',
                    'serverless-apigw-binary'
            ],
                                  cwd=output_path,
                                  stdout=PIPE,
                                  stderr=PIPE) as proc:
                response = parse_serverless_response(
                    proc.stdout.read().decode('utf-8'))
                logger.debug('Serverless response: %s', '\n'.join(response))

            create_aws_lambda_bundle(self.bento_service, output_path,
                                     self.region, self.stage)
        else:
            raise BentoMLException(
                "%s is not supported in current version of BentoML" %
                self.provider)

        shutil.copy(os.path.join(self.archive_path, 'requirements.txt'),
                    output_path)

        model_serivce_archive_path = os.path.join(output_path,
                                                  self.bento_service.name)
        shutil.copytree(self.archive_path, model_serivce_archive_path)

        return os.path.realpath(output_path)
Пример #5
0
    def deploy(self):
        """Deploy BentoML service to AWS Sagemaker.
        Your AWS credential must have the correct permissions for sagemaker and ECR

        1. Create docker image and push to ECR
        2. Create sagemaker model base on the ECR image
        3. Create sagemaker endpoint configuration base on sagemaker model
        4. Create sagemaker endpoint base on sagemaker endpoint configuration

        :param archive_path: Path
        :param additional_info: Dict
        :return: String, location to the output snapshot's path
        """
        snapshot_path = generate_bentoml_deployment_snapshot_path(
            self.bento_service.name, self.bento_service.version,
            "aws-sagemaker")
        shutil.copytree(self.archive_path, snapshot_path)
        with open(os.path.join(snapshot_path, "nginx.conf"), "w") as f:
            f.write(DEFAULT_NGINX_CONFIG)
        with open(os.path.join(snapshot_path, "wsgi.py"), "w") as f:
            f.write(DEFAULT_WSGI_PY)
        with open(os.path.join(snapshot_path, "serve"), "w") as f:
            f.write(DEFAULT_SERVE_SCRIPT)

        # permission 755 is required for entry script 'serve'
        permission = "755"
        octal_permission = int(permission, 8)
        os.chmod(os.path.join(snapshot_path, "serve"), octal_permission)

        execution_role_arn = get_arn_role_from_current_user()
        ecr_image_path = create_push_image_to_ecr(self.bento_service,
                                                  snapshot_path)
        sagemaker_model_info = {
            "ModelName": self.model_name,
            "PrimaryContainer": {
                "ContainerHostname": self.model_name,
                "Image": ecr_image_path,
                "Environment": {
                    "API_NAME": self.api.name
                },
            },
            "ExecutionRoleArn": execution_role_arn,
        }
        logger.info("Creating sagemaker model %s", self.model_name)
        create_model_response = self.sagemaker_client.create_model(
            **sagemaker_model_info)
        logger.info("AWS create model response: %s", create_model_response)

        production_variants = [{
            "VariantName": self.bento_service.name,
            "ModelName": self.model_name,
            "InitialInstanceCount": self.instance_count,
            "InstanceType": self.instant_type,
        }]
        logger.info("Creating sagemaker endpoint %s configuration",
                    self.endpoint_config_name)
        create_endpoint_config_response = self.sagemaker_client.create_endpoint_config(
            EndpointConfigName=self.endpoint_config_name,
            ProductionVariants=production_variants,
        )
        logger.info("AWS create endpoint config response: %s",
                    create_endpoint_config_response)

        logger.info("Creating sagemaker endpoint %s", self.bento_service.name)
        create_endpoint_response = self.sagemaker_client.create_endpoint(
            EndpointName=self.bento_service.name,
            EndpointConfigName=self.endpoint_config_name,
        )
        logger.info("AWS create endpoint response: %s",
                    create_endpoint_response)

        # TODO: maybe wait for this endpoint from creating to running and then return
        return snapshot_path
Пример #6
0
def deploy_bentoml(
    clipper_conn,
    archive_path,
    api_name,
    input_type="strings",
    model_name=None,
    labels=["bentoml"],
):
    """Deploy bentoml bundle to clipper cluster

    Args:
        clipper_conn(clipper_admin.ClipperConnection): Clipper connection instance
        archive_path(str): Path to the bentoml service archive.
        api_name(str): name of the api that will be used as prediction function for
            clipper cluster
        input_type(str): Input type that clipper accept. The default input_type for
            image handler is `bytes`, for other handlers is `strings`. Availabel
            input_type are `integers`, `floats`, `doubles`, `bytes`, or `strings`
        model_name(str): Model's name for clipper cluster
        labels(:obj:`list(str)`, optional): labels for clipper model

    Returns:
        tuple: Model name and model version that deployed to clipper

    """
    bento_service = load(archive_path)
    apis = bento_service.get_service_apis()

    if api_name:
        api = next(item for item in apis if item.name == api_name)
    elif len(apis) == 1:
        api = apis[0]
    else:
        raise BentoMLException(
            "Please specify api-name, when more than one API is present in the archive"
        )
    model_name = model_name or generate_clipper_compatiable_string(
        bento_service.name + "-" + api.name
    )
    version = generate_clipper_compatiable_string(bento_service.version)

    if isinstance(api.handler, ImageHandler):
        input_type = "bytes"

    try:
        clipper_conn.start_clipper()
    except docker.errors.APIError:
        clipper_conn.connect()
    except Exception:
        raise BentoMLException("Can't start or connect with clipper cluster")

    snapshot_path = generate_bentoml_deployment_snapshot_path(
        bento_service.name, bento_service.version, "clipper"
    )

    entry_py_content = DEFAULT_CLIPPER_ENTRY.format(
        api_name=api.name, input_type=input_type
    )
    model_path = os.path.join(snapshot_path, "bento")
    shutil.copytree(archive_path, model_path)

    with open(os.path.join(snapshot_path, "clipper_entry.py"), "w") as f:
        f.write(entry_py_content)

    docker_content = DOCKERFILE_CLIPPER.format(
        model_name=model_name, model_version=version
    )
    with open(os.path.join(snapshot_path, "Dockerfile-clipper"), "w") as f:
        f.write(docker_content)

    docker_api = docker.APIClient()
    image_tag = bento_service.name.lower() + "-clipper:" + bento_service.version
    for line in docker_api.build(
        path=snapshot_path, dockerfile="Dockerfile-clipper", tag=image_tag
    ):
        process_docker_api_line(line)

    clipper_conn.deploy_model(
        name=model_name,
        version=version,
        input_type=input_type,
        image=image_tag,
        labels=labels,
    )

    return model_name, version