Пример #1
0
def Azure_ML_experiment():

    # create experiment
    experiment_name = 'aftershock-model-train'
    exp = Experiment(workspace=ws, name=experiment_name)

    # creation of compute task
    compute_target = create_compute_resource(ws)

    # to install required packages
    env = Environment('aftershock-env-01')
    cd = CondaDependencies.create(pip_packages=[
        'azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'
    ],
                                  conda_packages=['scikit-learn==0.24.2'])

    env.python.conda_dependencies = cd

    src = ScriptRunConfig(source_directory=os.getcwd(),
                          script='train.py',
                          arguments=[],
                          compute_target=compute_target,
                          environment=env)

    # submit the train script to the experiment
    run = exp.submit(config=src)
    print(run.get_file_names())
Пример #2
0
def main():
    """
    Deploy model to your service
    """
    work_space = Workspace.from_config()
    environment = Environment("keras-service-environment")
    environment.python.conda_dependencies = CondaDependencies.create(
        python_version="3.7.7",
        pip_packages=["azureml-defaults", "numpy", "tensorflow==2.3.1"],
    )
    model = Model(work_space, "keras_mnist")
    model_list = model.list(work_space)
    validation_accuracy = []
    version = []
    for i in model_list:
        validation_accuracy.append(float(i.properties["val_accuracy"]))
        version.append(i.version)
    model = Model(work_space,
                  "keras_mnist",
                  version=version[np.argmax(validation_accuracy)])
    service_name = "keras-mnist-service"
    inference_config = InferenceConfig(entry_script="score_keras.py",
                                       environment=environment)
    aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)
    service = Model.deploy(
        workspace=work_space,
        name=service_name,
        models=[model],
        inference_config=inference_config,
        deployment_config=aci_config,
        overwrite=True,
    )
    service.wait_for_deployment(show_output=True)
    print(service.get_logs())
Пример #3
0
    def __get_run_config(self,
                         compute_target,
                         channels=None,
                         conda_packages=None,
                         pip_packages=None):
        # Load the "cpu-dsvm.runconfig" file (created by the above attach operation) in memory
        run_config = RunConfiguration(framework="python")

        # Set compute target to the Linux DSVM
        run_config.target = compute_target.name

        # Use Docker in the remote VM
        run_config.environment.docker.enabled = False

        # Ask system to provision a new one based on the conda_dependencies.yml file
        run_config.environment.python.user_managed_dependencies = False

        # Prepare the Docker and conda environment automatically when used the first time.
        run_config.auto_prepare_environment = True

        # specify dependencies obj
        conda_dependencies = CondaDependencies.create(
            conda_packages=conda_packages, pip_packages=pip_packages)
        if (channels):
            for channel in channels:
                conda_dependencies.add_channel(channel)

        run_config.environment.python.conda_dependencies = conda_dependencies

        return run_config
Пример #4
0
def main(workspace,inputs):

    print("Loading compute target")
    compute_target = ComputeTarget(
        workspace=workspace,
        name=inputs["compute"]
    )
    # create a new runconfig object
    run_config = RunConfiguration()

    # enable Docker 
    run_config.environment.docker.enabled = True

    # set Docker base image to the default CPU-based image
    run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE

    # use conda_dependencies.yml to create a conda environment in the Docker image for execution
    run_config.environment.python.user_managed_dependencies = False

    # specify CondaDependencies obj
    run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['scikit-learn'])

    # For this step, we use yet another source_directory
    step = PythonScriptStep(name=inputs["step_name"],
                            script_name=inputs["train_script"], 
                            compute_target=compute_target, 
                            source_directory=inputs["source_directory"],
                            runconfig=run_config,
                            allow_reuse=True)
    return step
def create_resnet_image_config(conda_file="img_env.yml",
                               execution_script="driver.py"):
    """

    :param conda_file:
    :param execution_script:
    :return:
    """
    conda_pack = ["tensorflow-gpu==1.14.0"]
    requirements = [
        "keras==2.2.0", "Pillow==5.2.0", "azureml-defaults",
        "azureml-contrib-services", "toolz==0.9.0"
    ]
    imgenv = CondaDependencies.create(conda_packages=conda_pack,
                                      pip_packages=requirements)
    with open("img_env.yml", "w") as file:
        file.write(imgenv.serialize_to_string())

    description = "Image for AKS Deployment Tutorial"
    dependencies = ["resnet152.py"]
    tags = {"name": "AKS", "project": "AML"}
    return ContainerImage.image_configuration(
        execution_script=execution_script,
        runtime="python",
        conda_file=conda_file,
        description=description,
        tags=tags,
        dependencies=dependencies,
        enable_gpu=True)
Пример #6
0
def run():
    print("entered run")
    variables_received = "sub_id: {}, rg: {}, work_name: {}, state: {}, author: {}, model_name: {}" \
                            .format(resolve_sub_id(),
                                    resolve_rg(),
                                    resolve_workspace_name(),
                                    resolve_state(),
                                    resolve_author(),
                                    resolve_model_name())
    print(variables_received)

    az_ws = Workspace(resolve_sub_id(), resolve_rg(), resolve_workspace_name())
    print("initialized workspace")
    #Get & Download model
    model = Model(az_ws,
                  name=resolve_model_name(),
                  tags={
                      "state": resolve_state(),
                      "created_by": resolve_author()
                  })
    print("initialized model")
    model.download(target_dir="./assets/")
    print("downloaded model assets")
    #TODO: remove workaround for ml sdk dropping assets into /assets/dacrook folder when files dropped to consistent location
    for dir_p, _, f_n in walk("./assets"):
        for f in f_n:
            abs_path = os.path.abspath(os.path.join(dir_p, f))
            shutil.move(abs_path, "./assets/" + f)

    #Configure Image
    my_env = CondaDependencies.create(conda_packages=["numpy", "scikit-learn"])
    with open("myenv.yml", "w") as f:
        f.write(my_env.serialize_to_string())
    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="myenv.yml",
        dependencies=["assets", "inference_code"],
        tags={
            "state": resolve_state(),
            "created_by": resolve_author()
        })
    print("configured image")
    #TODO: use this once model is dropped to a consistent location
    #    image = Image.create(workspace = az_ws, name=resolve_image_name(), models=[model], image_config = image_config)
    image = Image.create(workspace=az_ws,
                         name=resolve_image_name(),
                         models=[model],
                         image_config=image_config)
    image.wait_for_creation()
    print("created image")
    if (image.creation_state != "Succeeded"):
        raise Exception("Failed to create image.")
    print("image location: {}".format(image.image_location))
    artifacts = {"image_location": image.image_location}
    if (not os.path.exists("/artifacts/")):
        os.makedirs("/artifacts/")
    with open("/artifacts/artifacts.json", "w") as outjson:
        json.dump(artifacts, outjson)
Пример #7
0
def deployModelAsWebService(
        ws,
        model_folder_path="models",
        model_name="component_compliance",
        scoring_script_filename="scoring_service.py",
        conda_packages=['numpy', 'pandas'],
        pip_packages=['azureml-sdk', 'onnxruntime'],
        conda_file="dependencies.yml",
        runtime="python",
        cpu_cores=1,
        memory_gb=1,
        tags={'name': 'scoring'},
        description='Compliance classification web service.',
        service_name="complianceservice"):
    # notice for the model_path, we supply the name of the outputs folder without a trailing slash
    # this will ensure both the model and the customestimators get uploaded.
    print("Registering and uploading model...")
    registered_model = Model.register(model_path=model_folder_path,
                                      model_name=model_name,
                                      workspace=ws)

    # create a Conda dependencies environment file
    print("Creating conda dependencies file locally...")
    from azureml.core.conda_dependencies import CondaDependencies
    mycondaenv = CondaDependencies.create(conda_packages=conda_packages,
                                          pip_packages=pip_packages)
    with open(conda_file, "w") as f:
        f.write(mycondaenv.serialize_to_string())

    # create container image configuration
    print("Creating container image configuration...")
    from azureml.core.image import ContainerImage
    image_config = ContainerImage.image_configuration(
        execution_script=scoring_script_filename,
        runtime=runtime,
        conda_file=conda_file)

    # create ACI configuration
    print("Creating ACI configuration...")
    from azureml.core.webservice import AciWebservice, Webservice
    aci_config = AciWebservice.deploy_configuration(cpu_cores=cpu_cores,
                                                    memory_gb=memory_gb,
                                                    tags=tags,
                                                    description=description)

    # deploy the webservice to ACI
    print("Deploying webservice to ACI...")
    webservice = Webservice.deploy_from_model(workspace=ws,
                                              name=service_name,
                                              deployment_config=aci_config,
                                              models=[registered_model],
                                              image_config=image_config)
    webservice.wait_for_deployment(show_output=True)

    return webservice
Пример #8
0
def createImage(workspace, scoring_file, model, image_name):
    '''
        TODO: We should probably allow the conda_pack/requirements to be identified so we can switch
              between CPU/GPU

        NOTE: This function doesn't check for the existence of an image because new builds 
              will just create a new version on a container. If the caller doesn't want duplicates, 
              they need to ensure that one does not exist already.


        Creates a new Docker Container image and uploads it to the associated ACR 
        with the workspace. 

        PARAMS: 
            workspace        : azureml.core.Workspace   : Existing AMLS Workspace
            scoring_file     : String                   : Name/path of local .py file that has an init() and run() function defined.
            model            : azureml.core.Model       : Registered AMLS model
            image_name       : String                   : Name of the container to be created.


        RETURNS: 
            azureml.core.image.ContainerImage
    '''

    conda_pack = []
    requirements = ["azureml-defaults==1.0.57", "azureml-contrib-services"]

    print("Creating image...")

    simple_environment = CondaDependencies.create(conda_packages=conda_pack,
                                                  pip_packages=requirements)

    with open("simple.yml", "w") as f:
        f.write(simple_environment.serialize_to_string())

    image_config = ContainerImage.image_configuration(
        execution_script=scoring_file,
        runtime="python",
        conda_file="simple.yml",
        description="Image with dummy (unused) model",
        tags={"type": "noop"},
        dependencies=[])

    image = ContainerImage.create(
        name=image_name,
        models=[model],
        image_config=image_config,
        workspace=workspace,
    )

    image.wait_for_creation(show_output=True)
    print("Image created IMAGE/VERSION: ", image.name, '/', image.version)

    return image
    def _get_conda_deps(self, step):
        """
        converts requirements.txt from user into conda dependencies for AzML

        :param dict step: step defined by user that we are currently building

        :returns: conda_dependencies
        :rtype: CondaDependencies
        """
        with open(step["requirements"], "r") as f:
            packages = [line.strip() for line in f]

        return CondaDependencies.create(pip_packages=packages)
Пример #10
0
def main():
    # get access to workspace
    try:
        ws = Workspace.from_config()
        print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
        print('Library configuration succeeded')
    except:
        print('Workspace not found')
        return

    # get model
    model = Model(ws, 'absa')

    # deploy model

    pip = [
        "azureml-defaults", "azureml-monitoring",
        "git+https://github.com/NervanaSystems/nlp-architect.git@absa",
        "spacy==2.1.4"
    ]

    myenv = CondaDependencies.create(pip_packages=pip)

    with open("absaenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    deploy_env = Environment.from_conda_specification('absa_env',
                                                      "absaenv.yml")
    deploy_env.environment_variables = {'NLP_ARCHITECT_BE': 'CPU'}

    inference_config = InferenceConfig(environment=deploy_env,
                                       entry_script="score.py")

    deploy_config = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        description='Aspect-Based Sentiment Analysis - Intel')
    print('Initiating deployment')
    deployment = Model.deploy(ws,
                              'absa-svc',
                              models=[model],
                              inference_config=inference_config,
                              deployment_config=deploy_config,
                              overwrite=True)

    deployment.wait_for_deployment(show_output=True)
    print('Getting Logs')
    deployment.get_logs()
    print('Done!')
Пример #11
0
def get_automl_environment():
    from azureml.core import Environment
    from azureml.core.conda_dependencies import CondaDependencies
    from azureml.core.runconfig import DEFAULT_CPU_IMAGE

    train_env = Environment(name="many_models_environment_automl")
    train_conda_deps = CondaDependencies.create(
        pip_packages=['azureml-sdk[automl]', 'joblib', 'pyarrow==0.14'])
    train_conda_deps.add_conda_package('pandas==0.23.4')
    train_conda_deps.add_conda_package('numpy==1.16.2')
    train_conda_deps.add_conda_package('fbprophet==0.5')
    train_conda_deps.add_conda_package('py-xgboost==0.90')
    train_env.python.conda_dependencies = train_conda_deps
    train_env.docker.enabled = True
    train_env.docker.base_image = DEFAULT_CPU_IMAGE
    return train_env
Пример #12
0
    def executeAction(self, experiment, project_folder, training_Script,
                      cpu_cluster, github_SHA):
        #TODO: take environment as input from user for reusing it, may be useful when resubmitting the experiment.
        myenv = Environment("myenv")
        myenv.docker.enabled = True
        myenv.python.conda_dependencies = CondaDependencies.create(
            conda_packages=['scikit-learn'])
        src = ScriptRunConfig(source_directory=project_folder,
                              script=training_Script)
        # Set compute target to the one created in previous step
        src.run_config.target = cpu_cluster.name

        # Set environment
        src.run_config.environment = myenv
        tags = {"github_SHA": github_SHA}
        run = experiment.submit(config=src, tags=tags)
Пример #13
0
def create_lightgbm_conda_file(conda_file: str = "lgbmenv.yml"):
    """
    Create new Conda File with LightGBM requirements.

    :param conda_file: filename of LightGBM conda file, which is created during call.
    """
    conda_pack = ["scikit-learn==0.19.1", "pandas==0.23.3"]
    requirements = [
        "lightgbm==2.1.2",
        "azureml-defaults==1.0.57",
        "azureml-contrib-services",
        "Microsoft-AI-Azure-Utility-Samples",
    ]
    lgbmenv = CondaDependencies.create(
        conda_packages=conda_pack, pip_packages=requirements
    )
    with open(conda_file, "w") as file:
        file.write(lgbmenv.serialize_to_string())
def data_preprocess_step(raw_data_dir, compute_target):

    run_config = RunConfiguration()
    run_config.environment.python.conda_dependencies = CondaDependencies.create(
        pip_packages=['pandas'])
    run_config.environment.docker.enabled = True

    train_dir = PipelineData(name='train_dir',
                             pipeline_output_name='train_dir',
                             datastore=raw_data_dir.datastore,
                             output_mode='mount',
                             is_directory=True)

    test_dir = PipelineData(name='test_dir',
                            pipeline_output_name='test_dir',
                            datastore=raw_data_dir.datastore,
                            output_mode='mount',
                            is_directory=True)

    outputs = [train_dir, test_dir]
    outputs_map = {
        'train_dir': train_dir,
        'test_dir': test_dir,
    }

    step = PythonScriptStep(script_name='data_preprocess.py',
                            arguments=[
                                '--raw_data_dir',
                                raw_data_dir,
                                '--train_dir',
                                train_dir,
                                '--test_dir',
                                test_dir,
                            ],
                            inputs=[raw_data_dir],
                            outputs=outputs,
                            compute_target=compute_target,
                            runconfig=run_config,
                            source_directory=os.path.dirname(
                                os.path.abspath(__file__)),
                            allow_reuse=True)

    return step, outputs_map
Пример #15
0
def create_conda_environment(workspace, name, conda_dependencies,
                             pip_dependencies):
    """
    Create an environment or retrieve it by its name from workspace
    Pip installs Python packages whereas conda installs packages which may contain software written in any language.
    e.g. TensorFlow, Scikit-Learn -> Conda, Matplotlib -> pip   
    """
    if name in Environment.list(workspace):
        env = Environment.get(workspace=workspace, name=name)
        print("The environment '{}' already existed for the workspace".format(
            name))
    else:
        env = Environment(name=name)
        env.docker.enabled = True
        env.python.conda_dependencies = CondaDependencies.create(
            conda_packages=conda_dependencies,
            pip_packages=pip_dependencies,
        )
        env.register(workspace=workspace)
    return env
def get_automl_environment():
    from azureml.core import Environment
    from azureml.core.conda_dependencies import CondaDependencies
    from azureml.core.runconfig import DEFAULT_CPU_IMAGE

    train_env = Environment(name="many_models_environment_automl")
    train_conda_deps = CondaDependencies.create(
        pip_packages=['azureml-sdk[automl]', 'pyarrow==0.14'])
    train_conda_deps.add_pip_package('py-cpuinfo==5.0.0')
    train_conda_deps.add_conda_package('psutil')
    train_conda_deps.add_conda_package('pandas==0.23.4')
    train_conda_deps.add_conda_package('numpy==1.16.2')
    train_conda_deps.add_conda_package('fbprophet==0.5')
    train_conda_deps.add_conda_package('py-xgboost==0.90')
    train_env.python.conda_dependencies = train_conda_deps
    train_env.docker.enabled = True
    train_env.docker.base_image = DEFAULT_CPU_IMAGE
    env = {}
    env['AZUREML_FLUSH_INGEST_WAIT'] = ''
    train_env.environment_variables = env
    return train_env
Пример #17
0
def mi_run_config(ws, compute):
    whl_url = Environment.add_private_pip_wheel(workspace=ws,
                                                file_path=d.WHL_VINX_AZURE_ML,
                                                exist_ok=True)
    run_config = RunConfiguration()
    run_config.target = compute
    run_config.environment.docker.enabled = True
    run_config.environment.docker.base_image = None
    run_config.environment.docker.base_dockerfile = 'FROM mcr.microsoft.com/azureml/base:latest\nRUN apt-get update && apt-get -y install freetds-dev freetds-bin vim gcc'
    run_config.environment.python.user_managed_dependencies = False
    run_config.environment.python.conda_dependencies = CondaDependencies.create(
        conda_packages=[
            'tqdm', 'cython', 'matplotlib', 'scikit-learn', 'fbprophet'
        ],
        pip_packages=[
            'azureml-sdk', 'pandas', 'lightgbm', 'scipy==1.4.1', 'statsmodels',
            'mlxtend', 'optuna', 'xgboost', 'CatBoost', 'tensorflow', 'keras',
            'jpholiday', 'joblib', 'pymssql==2.1.1'
        ],
        pin_sdk_version=False)
    run_config.environment.python.conda_dependencies.add_pip_package(whl_url)

    return run_config
Пример #18
0
from azureml.core import RunConfiguration, ScriptRunConfig, Dataset, Workspace, Environment
from azureml.core.runconfig import Data, DataLocation, Dataset as RunDataset
from azureml.core.script_run_config import get_run_config_from_script_run
from azureml.core.conda_dependencies import CondaDependencies

# Connect to the workspace
ws = Workspace.from_config()

# Create a new environment and set Conda dependencies
conda_env = Environment('conda-env')
conda_env.python.conda_dependencies = CondaDependencies.create(
    pin_sdk_version=False,
    pip_packages=[
        'scikit-learn',
        'azureml-sdk',
        'azureml-dataprep[pandas,fuse]'
    ])

# Get the dataset that will be used
dataset = Dataset.get_by_name(ws, 'mnist-dataset')
# Define the environment variable/where data will be mounted
input_name = 'mnist'
# Define the name of the compute target for training
compute_name = 'cpu-cluster'

# Define the script run config
src = ScriptRunConfig(
    source_directory='scripts',
    script='train.py',
    arguments=[
        '--data-folder',
compute_target.wait_for_completion(show_output=True,
                                   min_node_count=None,
                                   timeout_in_minutes=20)

# In[41]:

#prepare the runtime
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies

run_config = RunConfiguration(framework="python")
run_config.target = compute_target
run_config.environment.docker.enabled = True
run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE

dependencies = CondaDependencies.create(
    pip_packages=["scikit-learn", "scipy", "numpy"])
run_config.environment.python.conda_dependencies = dependencies

# NOT xplainabe
# automl_config = AutoMLConfig(task='classification',
#                              debug_log='automl_errors.log',
#                              path=project_folder,
#                              compute_target=compute_target,
#                              run_configuration=run_config,
#                              X = X,  ##use the remote uploaded data
#                              y = y,
#                              **automl_settings,
#                              )
#
#                              #explainability
# automl_config = AutoMLConfig(task='classification',
Пример #20
0
        result = str(e)
        return result
    
""".format(model_name=model_name)

exec(score_sparkml)

with open("score_sparkml.py", "w") as file:
    file.write(score_sparkml)

# COMMAND ----------

from azureml.core.conda_dependencies import CondaDependencies

myacienv = CondaDependencies.create(
    conda_packages=[]
)  #showing how to add libs as an eg. - not needed for this model.

with open("mydeployenv.yml", "w") as f:
    f.write(myacienv.serialize_to_string())

# COMMAND ----------

#deploy to ACI
from azureml.core.webservice import AciWebservice, Webservice

myaci_config = AciWebservice.deploy_configuration(
    cpu_cores=2,
    memory_gb=2,
    tags={'name': 'Databricks Azure ML ACI'},
    description='SMS Spam Classifier')
Пример #21
0

# Usually, the  cluster already exists, so we just fetch
compute_target = next(
    (m for m in ComputeTarget.list(ws) if m.name == compute["name"]), None
)

# Specify the compute environment and register it for use in scoring
env = Environment("component-condition")
env.docker.enabled = True
cd = CondaDependencies.create(
    conda_packages=[
        "tensorflow=2.0.0",
        "pandas",
        "numpy",
        "matplotlib"
        ],
    pip_packages=[
        "azureml-mlflow==1.5.0",
        "azureml-defaults==1.5.0"
    ]
)
env.python.conda_dependencies = cd
env.register(workspace=ws)
print("Registered environment component-condition")

# Specify the run configuration
run_config = RunConfiguration()
run_config.environment.docker.enabled = True
run_config.environment.python.conda_dependencies = cd

# Pipeline definition
Пример #22
0
    cpu_cores=1,
    memory_gb=1,
    tags={
        "data": "Random Recommender",
        "Gestore": "Alessandro Artoni",
        "Owner": "Alessandro Artoni",
        "Environment": "dev",
        "Progetto": "Random recommender example"
    },
    description='Example on how to deploy a random recommender',
)

logger.info("ACI Deployed")

env = Environment('custom')
env.python.conda_dependencies = CondaDependencies.create(
    pip_packages=['azureml-defaults', 'joblib', 'numpy'])

inference_config = InferenceConfig(entry_script="score.py",
                                   source_directory="recommender",
                                   environment=env)

logger.info("Inference config setted")

if (WHERE_TO_DEPLOY == "LOCAL"):
    deployment_config = LocalWebservice.deploy_configuration(port=8890)
    # Deploy the service
    service = Model.deploy(ws, "localmodel", [model], inference_config,
                           deployment_config)
    # Wait for the deployment to complete
    service.wait_for_deployment(True)
    # Display the port that the web service is available on
Пример #23
0
    experiment = Experiment(ws, experiment_name)
    automl_runs = list(experiment.get_runs(type='automl'))

    assert (len(automl_runs) == 1)

    compute_name = 'mydsvmb'

    dsvm_compute = ws.compute_targets[compute_name]

    # create a new RunConfig object
    conda_run_config = RunConfiguration(framework="python")

    # Set compute target to the Linux DSVM
    conda_run_config.target = dsvm_compute

    cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'],
                                  conda_packages=['numpy'])
    conda_run_config.environment.python.conda_dependencies = cd

    automl_settings = {
        "iteration_timeout_minutes": 60,
        "iterations": 100,
        "n_cross_validations": 5,
        "primary_metric": 'AUC_weighted',
        "preprocess": True,
        "max_cores_per_iteration": 2
    }

    automl_config = AutoMLConfig(task='classification',
                                 path=project_folder,
                                 run_configuration=conda_run_config,
                                 data_script=project_folder + "/get_data.py",
Пример #24
0
    container_registry = ContainerRegistry()
    container_registry.address = deployment_settings["image"]["docker"][
        "custom_image_registry_details"]["address"]
    container_registry.username = deployment_settings["image"]["docker"][
        "custom_image_registry_details"]["username"]
    container_registry.password = deployment_settings["image"]["docker"][
        "custom_image_registry_details"]["password"]
else:
    container_registry = None

# Creating dependencies
print("Creating dependencies and registering environment")
conda_dep = CondaDependencies.create(
    conda_packages=deployment_settings["image"]["dependencies"]
    ["conda_packages"],
    pip_packages=deployment_settings["image"]["dependencies"]["pip_packages"],
    python_version=deployment_settings["image"]["dependencies"]
    ["python_version"],
    pin_sdk_version=deployment_settings["image"]["dependencies"]
    ["pin_sdk_version"])
dep_path = os.path.join("code", "scoring", "myenv.yml")
conda_dep.save(path=dep_path)

# Creating InferenceConfig
print("Creating InferenceConfig")
if deployment_settings["image"]["use_custom_environment"]:
    env = utils.get_environment(name_suffix="_deployment")
    inferenceConfig = InferenceConfig(
        entry_script=deployment_settings["image"]["entry_script"],
        source_directory=deployment_settings["image"]["source_directory"],
        runtime=deployment_settings["image"]["runtime"],
        environment=env)
Пример #25
0
# Create a new runconfig object
run_amlcompute = RunConfiguration()

# Use the cpu_cluster you created above.
run_amlcompute.target = cpu_cluster

# Enable Docker
run_amlcompute.environment.docker.enabled = True

# Set Docker base image to the default CPU-based image
run_amlcompute.environment.docker.base_image = DEFAULT_CPU_IMAGE

# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
run_amlcompute.environment.python.user_managed_dependencies = False

# Specify CondaDependencies obj, add necessary packages
run_amlcompute.environment.python.conda_dependencies = CondaDependencies.create(
    conda_packages=['scikit-learn'])
#</run_amlcompute>

# Submit the experiment using the run configuration
#<amlcompute_submit>
from azureml.core import ScriptRunConfig

src = ScriptRunConfig(source_directory=script_folder,
                      script='train.py',
                      run_config=run_amlcompute)
run = exp.submit(src)
run.wait_for_completion(show_output=True)
#</amlcompute_submit>
Пример #26
0
    try:
        start = time.time()   # start timer
        input_data = preprocess(input_data_json)
        input_name = session.get_inputs()[0].name  # get the id of the first input of the model   
        result = session.run([], {input_name: input_data})
        end = time.time()     # stop timer
        return {"result": postprocess(result),
                "time": end - start}
    except Exception as e:
        result = str(e)
        return {"error": result}

# %%
from azureml.core.conda_dependencies import CondaDependencies 

myenv = CondaDependencies.create(pip_packages=["numpy","onnxruntime","azureml-core", "azureml-defaults"])

with open("myenv.yml","w") as f:
    f.write(myenv.serialize_to_string())   

# %%    
# 
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment


myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv) 

# %%    
from azureml.core.webservice import AciWebservice
Пример #27
0
    blob_datastore = DataStoreConfig.config(ws, config.BLOB_DATASTORE_NAME,
                                            config.ACCOUNT_NAME,
                                            config.CONTAINER_NAME,
                                            config.ACCOUNT_KEY)

    print("get datasets from datastore")

    input_data_paths = [(blob_datastore, 'mldata')]
    input_dataset = Dataset.File.from_files(path=input_data_paths)

    # ----PYTHON ENV------
    #-------------------------
    packages = CondaDependencies.create(
        conda_packages=["cudatoolkit=10.0"],
        pip_packages=[
            'azureml-sdk', 'PyYAML', 'azure-storage-blob', 'matplotlib',
            'seaborn', 'tensorflow', 'Keras', 'tensorflow-hub', 'joblib',
            'tqdm', 'Pillow', 'azureml-dataprep[pandas,fuse]>=1.1.14'
        ])

    diagnoz_env = Environment("diagnoz-pipeline-env")
    diagnoz_env.python.user_managed_dependencies = False  # Let Azure ML manage dependencies
    diagnoz_env.docker.enabled = True  # Use a docker container
    diagnoz_env.docker.base_image = DEFAULT_GPU_IMAGE
    diagnoz_env.python.conda_dependencies = packages
    diagnoz_env.register(workspace=ws)

    # Runconfigs
    pipeline_run_config = RunConfiguration()
    pipeline_run_config.target = compute_target
    pipeline_run_config.environment = diagnoz_env
Пример #28
0
print('Updating scoring file with the correct model name')
with open('score.py') as f:
    data = f.read()
with open('score_fixed.py', "w") as f:
    f.write(data.replace('MODEL-NAME',
                         args.model_name))  #replace the placeholder MODEL-NAME
    print('score_fixed.py saved')

# create a Conda dependencies environment file
print("Creating conda dependencies file locally...")
conda_packages = ['numpy']
pip_packages = [
    'tensorflow==2.0.0', 'keras==2.3.1', 'azureml-sdk', 'azureml-monitoring'
]
mycondaenv = CondaDependencies.create(conda_packages=conda_packages,
                                      pip_packages=pip_packages)

conda_file = 'scoring_dependencies.yml'
with open(conda_file, 'w') as f:
    f.write(mycondaenv.serialize_to_string())

# create container image configuration
print("Creating container image configuration...")
image_config = ContainerImage.image_configuration(
    execution_script='score_fixed.py', runtime='python', conda_file=conda_file)

print("Creating image...")
image = Image.create(name=args.image_name,
                     models=[latest_model],
                     image_config=image_config,
                     workspace=ws)
def build_vocab_step(train_dir, compute_target):
    '''
    This step will take the raw data downloaded from the previous step,
    preprocess it, and split into train, valid, and test directories.
    
    :param train_dir: The reference to the directory containing the training data
    :type train_src: DataReference
    :param compute_target: The compute target to run the step on
    :type compute_target: ComputeTarget
    
    :return: The preprocess step, step outputs dictionary (keys: vocab_dir)
    :rtype: PythonScriptStep, dict
    '''

    run_config = RunConfiguration()
    run_config.environment.docker.enabled = True
    run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE
    run_config.environment.python.user_managed_dependencies = False
    conda_packages = ['pytorch']
    run_config.environment.python.conda_dependencies = CondaDependencies.create(
        conda_packages=conda_packages)

    input_col = PipelineParameter(name='input_col', default_value='Title')
    output_col = PipelineParameter(name='output_col', default_value='Abstract')
    size = PipelineParameter(name='size', default_value=50000)
    freq_cutoff = PipelineParameter(name='freq_cutoff', default_value=2)

    vocab_dir = PipelineData(name='vocab_dir',
                             pipeline_output_name='vocab_dir',
                             datastore=train_dir.datastore,
                             output_mode='mount',
                             is_directory=True)

    outputs = [vocab_dir]
    outputs_map = {
        'vocab_dir': vocab_dir,
    }

    step = PythonScriptStep(name="Build Vocab",
                            script_name='build_vocab.py',
                            arguments=[
                                '--train_dir',
                                train_dir,
                                '--vocab_dir',
                                vocab_dir,
                                '--input_col',
                                input_col,
                                '--output_col',
                                output_col,
                                '--size',
                                size,
                                '--freq_cutoff',
                                freq_cutoff,
                            ],
                            inputs=[train_dir],
                            outputs=outputs,
                            compute_target=compute_target,
                            runconfig=run_config,
                            source_directory=os.path.dirname(
                                os.path.abspath(__file__)),
                            allow_reuse=True)

    return step, outputs_map
ws = Workspace.create(name=workspace_name,
                      subscription_id=subscription_id,
                      resource_group=resource_group,
                      create_resource_group=False,
                      location=workspace_region)

print('Workspace configuration completed')

# In[2]: Web service and container deployment configuration
from azureml.core.conda_dependencies import CondaDependencies
import os

os.chdir(working_directory)

myacienv = CondaDependencies.create(
    pip_packages=['gensim', 'nltk', 'numpy', 'inference_schema'])

with open("mydeployenv.yml", "w") as f:
    f.write(myacienv.serialize_to_string())

# ## Deployment
from azureml.core.webservice import AciWebservice, Webservice

# Deployment configuration for the container web service
aci_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                memory_gb=1,
                                                tags={"name": "Summarization"},
                                                description="Summarizes text")

# Container image configuration
service_name = "summarization1"