Exemplo n.º 1
0
model = Model(ws, '{}_{}'.format(RegisterModel, WhatModel))
model.download(target_dir='.', exist_ok=True)

import os
os.stat('./{}.pkl'.format(WhatModel))
model_path = Model.get_model_path('{}.pkl'.format(WhatModel))
estimator = joblib.load(model_path)

# Create Docker Image

print('# Create Docker Image')
from azureml.core.conda_dependencies import CondaDependencies

cd = CondaDependencies()
cd.save_to_file(".", "myenv.yml")

# This specifies the dependencies to include in the environment
from azureml.core.conda_dependencies import CondaDependencies

myenv = CondaDependencies.create(conda_packages=['scikit-learn', 'joblib'])

with open("myenv.yml", "w") as f:
    f.write(myenv.serialize_to_string())

from azureml.core.image import Image
from azureml.core.image import ContainerImage
# configure the image
image_config = ContainerImage.image_configuration(execution_script="score.py",
                                                  runtime="python",
                                                  conda_file="myenv.yml")
Exemplo n.º 2
0
# enable Docker 
run_config.environment.docker.enabled = True

# set Docker base image to the default CPU-based image
run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_MMLSPARK_CPU_IMAGE
print('base image is', run_config.environment.docker.base_image)
#run_config.environment.docker.base_image = 'microsoft/mmlspark:plus-0.9.9'

# use conda_dependencies.yml to create a conda environment in the Docker image for execution
# please update this file if you need additional packages.
run_config.environment.python.user_managed_dependencies = False

cd = CondaDependencies()
cd.add_conda_package('numpy')
# overwrite the default conda_dependencies.yml file
cd.save_to_file(project_dir = project_folder, file_name='conda_dependencies.yml')

# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.prepare_environment = True

print()
print('##################################################')
print('submitting {} for a Spark run on ACI...'.format(train_script))
print('##################################################')
print()

run = Run.submit(project_object = project, 
                 run_config = run_config, 
                 script_to_run = "train-spark.py")

print(helpers.get_run_history_url(run))
Exemplo n.º 3
0
def generate_yaml(
    directory: str,
    ref_filename: str,
    needed_libraries: list,
    conda_filename: str,
):
    """
    Creates a deployment-specific yaml file as a subset of
    the image classification environment.yml

    Also adds extra libraries, if not present in environment.yml

    Args:
        directory (string): Directory name of reference yaml file
        ref_filename (string): Name of reference yaml file
        needed_libraries (list of strings): List of libraries needed
        in the Docker container
        conda_filename (string): Name of yaml file to be deployed
        in the Docker container

    Returns: Nothing

    """

    with open(os.path.join(directory, ref_filename), "r") as f:
        yaml_content = yaml.load(f, Loader=yaml.FullLoader)

    # Extract libraries to be installed using conda
    extracted_libraries = [
        depend for depend in yaml_content["dependencies"]
        if any(lib in depend for lib in needed_libraries)
    ]

    # Extract libraries to be installed using pip
    if any(isinstance(x, dict) for x in yaml_content["dependencies"]):
        # if the reference yaml file contains a "pip" section,
        # find where it is in the list of dependencies
        ind = [
            yaml_content["dependencies"].index(depend)
            for depend in yaml_content["dependencies"]
            if isinstance(depend, dict)
        ][0]
        extracted_libraries += [
            depend for depend in yaml_content["dependencies"][ind]["pip"]
            if any(lib in depend for lib in needed_libraries)
        ]

    # Check whether additional libraries are needed
    not_found = [
        lib for lib in needed_libraries
        if not any(lib in ext for ext in extracted_libraries)
    ]

    # Create the deployment-specific yaml file
    conda_env = CondaDependencies()
    for ch in yaml_content["channels"]:
        conda_env.add_channel(ch)
    for library in extracted_libraries + not_found:
        conda_env.add_conda_package(library)

    # Display the environment
    print(conda_env.serialize_to_string())

    # Save the file to disk
    conda_env.save_to_file(base_directory=os.getcwd(),
                           conda_file_path=conda_filename)