Exemple #1
0
def _load_remote_environment(mlproject):
    '''
    Returns Environment object for remote compute execution with conda
    :param mlproject: instance of Project object from mlflow.projects._project_spec
    :return environment: AzureML Environment object
    '''
    if mlproject.conda_env_path:
        _logger.info(
            _CONSOLE_MSG.format(
                "Creating remote conda environment for project using MLproject"
            ))
        environment = Environment.from_conda_specification(
            name="environment", file_path=mlproject.conda_env_path)
        if environment.python.conda_dependencies._get_pip_package_with_prefix(
                "azureml-mlflow") == []:
            _logger.warning(
                "WARNING: MLproject doesn't contain pip dependency azureml-mlflow. Adding it now..."
            )
            environment.python.conda_dependencies.add_pip_package(
                "azureml-mlflow")
        environment.docker.enabled = True
        return environment
    elif mlproject.docker_env:
        '''Docker support is WIP!'''
        raise ExecutionException(
            "Docker support is in progress, please specify a conda environment instead"
        )
def get_environment(workspace: Workspace,
                    environment_name: str,
                    conda_dependencies_file: str,
                    create_new: bool = False,
                    enable_docker: bool = None,
                    use_gpu: bool = False):
    try:
        e = Env()
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        for env in environments:
            if env == environment_name:
                restored_environment = environments[environment_name]

        if restored_environment is None or create_new:
            new_env = Environment.from_conda_specification(
                environment_name,
                os.path.join(e.sources_directory_train,
                             conda_dependencies_file),  # NOQA: E501
            )  # NOQA: E501
            restored_environment = new_env
            if enable_docker is not None:
                restored_environment.docker.enabled = enable_docker
                restored_environment.docker.base_image = DEFAULT_GPU_IMAGE if use_gpu else DEFAULT_CPU_IMAGE  # NOQA: E501
            restored_environment.register(workspace)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception:
        traceback.print_exc()
        exit(1)
Exemple #3
0
def get_environment(workspace,
                    env_name,
                    conda_dependencies,
                    create_new=False,
                    enable_docker=None,
                    use_gpu=False):
    try:
        e = ENV()
        restore_env = None
        environments = Environment.list(workspace=workspace)
        for env in environments:
            if env == env_name:
                restore_env = environments[env_name]

        if restore_env is None or create_new:
            new_env = Environment.from_conda_specification(
                env_name,
                os.path.join(e.source_train_directory, conda_dependencies))
            restore_env = new_env
            if enable_docker:
                restore_env.docker.enabled = enable_docker
                restore_env.docker.base_image = DEFAULT_GPU_IMAGE if use_gpu else DEFAULT_CPU_IMAGE
            restore_env.register(workspace)
        if restore_env is not None:
            print(f'created environment: {restore_env}')
        return restore_env
    except Exception as error:
        print(f'Founding error: {error} occurred')
        exit(1)
def get_environment(workspace: Workspace,
                    environment_name: str,
                    conda_dependencies_file: str,
                    create_new: bool = False,
                    enable_docker: bool = None,
                    use_gpu: bool = False):
    try:
        e = Env()
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        for env in environments:
            if env == environment_name:
                restored_environment = environments[environment_name]

        if restored_environment is None or create_new:
            new_env = Environment.from_conda_specification(
                environment_name,
                os.path.join(e.sources_directory_train,
                             conda_dependencies_file),  # NOQA: E501
            )  # NOQA: E501
            restored_environment = new_env
            if enable_docker is not None:
                DockerConfiguration(use_docker=True)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception as e:
        print(e)
        exit(1)
def get_environment(workspace: Workspace,
                    environment_name: str,
                    create_new: bool = False):
    try:
        e = Env()
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        #   for env in environments:
        #       if env == environment_name:
        #           restored_environment = environments[environment_name]

        if restored_environment is None or create_new:
            new_env = Environment.from_conda_specification(
                environment_name,
                os.path.join(e.sources_directory_train,
                             "conda_dependencies.yml"))  # NOQA: E501
            restored_environment = new_env
            restored_environment.register(workspace)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception as e:
        print(e)
        exit(1)
def get_environment(workspace: Workspace,
                    environment_name: str,
                    conda_dependencies_file: str,
                    create_new: bool = False,
                    enable_docker: bool = None,
                    use_gpu: bool = False):
    try:
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        for env in environments:
            if env == environment_name:
                restored_environment = environments[environment_name]

        if restored_environment is None or create_new:
            new_env = Environment.from_conda_specification(
                environment_name,
                conda_dependencies_file,
            )
            restored_environment = new_env
            if enable_docker is not None:
                restored_environment.docker.enabled = enable_docker
                restored_environment.docker.base_image = DEFAULT_GPU_IMAGE if use_gpu else DEFAULT_CPU_IMAGE
            restored_environment.register(workspace)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception as e:
        print(e)
        exit(1)
Exemple #7
0
def _load_local_environment(mlproject, use_conda):
    '''
    Returns Environment object for project execution on local compute.
    If the MLproject contains a conda environment specification and use_conda is True
    from the backend_config object, we create a new environment from that specification
    for project training.
    If use_conda is False, we use the local, activated conda environment.
    :param mlproject: Project object loaded by Mlflow
    :param use_conda: bool
    :rtype environment: AzureML Environment object
    '''
    if mlproject.docker_env:
        '''Docker support is WIP!'''
        raise ExecutionException(
            "Docker support is in progress, please specify a conda environment instead"
        )
    elif use_conda is True and mlproject.conda_env_path:
        print(
            _CONSOLE_MSG.format(
                "Creating conda environment from Mlproject for local run"))
        try:
            environment = Environment.from_conda_specification(
                name="environment", file_path=mlproject.conda_env_path)
            environment.python.user_managed_dependencies = False
            # check if azureml-mlflow in pip dependencies
            if environment.python.conda_dependencies._get_pip_package_with_prefix(
                    "azureml-mlflow") == []:
                _logger.warning(
                    "WARNING: MLproject doesn't contain pip dependency azureml-mlflow. Adding it now..."
                )
                environment.python.conda_dependencies.add_pip_package(
                    "azureml-mlflow")
        except ExecutionException as e:
            raise ExecutionException(e)
        return environment
    else:
        conda_home = os.environ.get('CONDA_DEFAULT_ENV')
        if conda_home:
            try:
                if conda_home == "base":
                    _logger.warning(
                        "WARNING: Using inactive base conda environement for local run"
                    )
                environment = Environment.from_existing_conda_environment(
                    "environment", conda_home)
                environment.python.user_managed_dependencies = True
                print(
                    _CONSOLE_MSG.format(
                        "Using local conda environment {} for local run".
                        format(conda_home)))
            except ExecutionException as e:
                raise ExecutionException(e)
            return environment
        else:
            raise ExecutionException(
                "Local conda environment not found, check your path.")
Exemple #8
0
def get_environment():
    """
    Return an environment
    """
    environment = Environment.from_conda_specification(
        name="hd-train",
        file_path="./nd00333/model/hyperdrive/train/environment.yml")
    environment.python.user_managed_dependencies = False
    environment.docker.enabled = True
    environment.docker.base_image = (
        "mcr.microsoft.com/azureml/intelmpi2018.3-ubuntu16.04:20200821.v1")
    return environment
def register_new_conda_environment(env_name, yml_file_path):
    try:
        ws = _establish_connection_to_aml_workspace()
    except Exception as e:
        raise e
    try:
        new_env = Environment.from_conda_specification(name=env_name,
                                                       file_path=yml_file_path)
        new_env.register(ws)
        print("New environment {} has been registerd".format(env_name))
    except Exception as e:
        raise e
Exemple #10
0
def main():
    # get access to workspace
    try:
        ws = Workspace.from_config()
        print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
        print('Library configuration succeeded')
    except:
        print('Workspace not found')
        return

    # get model
    model = Model(ws, 'absa')

    # deploy model

    pip = [
        "azureml-defaults", "azureml-monitoring",
        "git+https://github.com/NervanaSystems/nlp-architect.git@absa",
        "spacy==2.1.4"
    ]

    myenv = CondaDependencies.create(pip_packages=pip)

    with open("absaenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    deploy_env = Environment.from_conda_specification('absa_env',
                                                      "absaenv.yml")
    deploy_env.environment_variables = {'NLP_ARCHITECT_BE': 'CPU'}

    inference_config = InferenceConfig(environment=deploy_env,
                                       entry_script="score.py")

    deploy_config = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        description='Aspect-Based Sentiment Analysis - Intel')
    print('Initiating deployment')
    deployment = Model.deploy(ws,
                              'absa-svc',
                              models=[model],
                              inference_config=inference_config,
                              deployment_config=deploy_config,
                              overwrite=True)

    deployment.wait_for_deployment(show_output=True)
    print('Getting Logs')
    deployment.get_logs()
    print('Done!')
    def deploy(self):
        myenv = CondaDependencies()
        myenv.add_pip_package("azureml-sdk")
        myenv.add_pip_package("joblib")
        myenv.add_pip_package("tensorflow")
        myenv.add_pip_package("Pillow")
        myenv.add_pip_package("azureml-dataprep[pandas,fuse]>=1.1.14")

        with open("diagnoz_env.yml", "w") as f:
            f.write(myenv.serialize_to_string())

        huml_env = Environment.from_conda_specification(
            name="diagnoz_env", file_path="diagnoz_env.yml")

        inference_config = InferenceConfig(entry_script="score.py",
                                           source_directory='.',
                                           environment=huml_env)
        print("file deployement : ")
        for root, dir_, files in os.walk(os.getcwd()):
            print("dir_", dir_)
            for filename in files:
                print("filename :", filename)

        aciconfig = AciWebservice.deploy_configuration(
            cpu_cores=1,
            memory_gb=1,
            tags={
                "data": "cancer-data",
                "method": "tensorflow"
            },
            description='Predicting cancer with tensorflow')

        try:
            AciWebservice(self.ws, self.config.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        model = self.ws.models[self.config.MODEL_NAME]

        service = Model.deploy(workspace=self.ws,
                               name=self.config.DEPLOY_SERVICE_NAME,
                               models=[model],
                               inference_config=inference_config,
                               deployment_config=aciconfig)

        service.wait_for_deployment(show_output=True)
        print("success deployement")
Exemple #12
0
def main(config, log):
    ws = Workspace(**config.workspace)

    env = Environment.from_conda_specification(**config.environment)
    env.docker.enabled = True
    env.docker.base_image = DEFAULT_GPU_IMAGE

    src = ScriptRunConfig(**config.run_config)
    src.run_config.environment = env
    src.run_config.target = ws.compute_targets[config.compute]

    experiment = Experiment(workspace=ws, name=config.experiment)
    run = experiment.submit(src)

    aml_url = run.get_portal_url()
    log.info(f'Run URL: {aml_url}')
Exemple #13
0
    def deploy(self):

        try:
            AciWebservice(self.ws, self.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        conda_dep = CondaDependencies()                                        
        conda_dep.add_pip_package("joblib")
        conda_dep.add_pip_package("torch")
        conda_dep.add_pip_package("torchvision")
        conda_dep.add_pip_package("azureml-sdk")
        conda_dep.add_pip_package("azure-storage-blob")
        conda_dep.add_pip_package("PyYAML")
        conda_dep.add_pip_package("scikit-learn")
        conda_dep.add_pip_package("matplotlib")
        conda_dep.add_pip_package("opencensus-ext-azure")
        
        
        shoes_designer_env_file = "shoes_designer_env.yml"
        with open(shoes_designer_env_file,"w") as f:
            f.write(conda_dep.serialize_to_string())

        shoes_designer_env = Environment.from_conda_specification(name="shoes_designer_env", file_path=shoes_designer_env_file)

        inference_config = InferenceConfig(entry_script="score.py", environment=shoes_designer_env)

        aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, 
                                                    memory_gb=2, 
                                                    tags={"method" : "torch"}, 
                                                    description='Generate shoes with torch')

        model = self.ws.models[self.MODEL_NAME]

        service = Model.deploy(workspace=self.ws, 
                            name=self.DEPLOY_SERVICE_NAME, 
                            models=[model], 
                            inference_config=inference_config, 
                            deployment_config=aciconfig,
                            overwrite=True)
        service.wait_for_deployment(show_output=True)

        print("success deployement")        

        return service
def create_azureml_env(ws, env_name, conda_yml):
    """
    Create an Azure ML environment based a default AML docker image
    and a yaml file that specifies Conda and Pip dependencies.
    Azure ML will create a new custom docker image for the env.
    """
    try:
        amlenv = Environment.get(ws, name=env_name)
        print('found existing env {}'.format(amlenv.name))
    except Exception:
        print('create new env {}'.format(env_name))
        amlenv = Environment.from_conda_specification(name=env_name,
                                                      file_path=conda_yml)
        amlenv.docker.enabled = True
        amlenv.docker.base_image = DEFAULT_CPU_IMAGE
        amlenv.python.user_managed_dependencies = False
        amlenv.register(ws)
    return amlenv
Exemple #15
0
def main():
    """
    Minimum sample to use the Observability logger
    in Azure ML Run or alone
    """

    pwd = sys.path[0]
    # Submit an Azure ML Run which uses the logger
    aml_ws = Workspace.from_config()
    aml_exp = Experiment(aml_ws, 'test_logger')
    aml_env = Environment.from_conda_specification(
        'test_logger_env', f'{pwd}/conda_dependency.yml')
    # if aml_cluster isn't specified, it'll run locally but
    # will still log to AML and AppInsights
    if run_on_local:
        aml_config = ScriptRunConfig(source_directory=pwd,
                                     script='train.py',
                                     environment=aml_env)
    else:
        # setting aicxn does not work when running local
        # because AML add "\" to ";" in the cxn string,
        # making the cxn string invalid.
        aicxn = 'APPLICATIONINSIGHTS_CONNECTION_STRING'
        aml_env.environment_variables[aicxn] = os.environ[aicxn]
        aml_cluster = aml_ws.compute_targets['train-cluster']
        aml_config = ScriptRunConfig(source_directory=pwd,
                                     script='train.py',
                                     environment=aml_env,
                                     compute_target=aml_cluster)
    aml_exp.submit(aml_config)

    # Use the logger directly
    logger.log("Shouldn't log INFO if default severity is WARNING")
    logger.log("Run into ERROR", severity=Severity.ERROR)
    logger.log_metric(name="metric1_no_parent", value=100)
    logger.log_metric(name="metric2_with_parent", value=200, log_parent=True)
    try:
        raise Exception("Run into EXCEPTION")
    except Exception as ex:
        logger.exception(ex)
    # allow time for appinsights exporter to send metrics
    time.sleep(30)
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import ScriptRunConfig

if __name__ == "__main__":
    #Connect to Azure ML WorkSpace
    ws = Workspace.from_config(path='./.azureml', _file_name='config.json')

    #Experiment
    experiment = Experiment(workspace=ws, name='day2-experiment-train-model')
    config = ScriptRunConfig(source_directory='./src',
                             script='model.py',
                             compute_target='cpu-cluster')

    # set up pytorch environment
    env = Environment.from_conda_specification(
        name='sklearn-aml-env', file_path='./.azureml/sklearn-env-aml.yml')

    config.run_config.environment = env

    #Execute experiment
    run = experiment.submit(config)

    #Print url
    aml_url = run.get_portal_url()
    print(aml_url)
Exemple #17
0
def deploy(model_uri, workspace, deployment_config=None, service_name=None, model_name=None,
           tags=None, mlflow_home=None, synchronous=True):
    """
    Register an MLflow model with Azure ML and deploy a websevice to Azure Container Instances (ACI)
    or Azure Kubernetes Service (AKS).

    The deployed service will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param workspace: The AzureML workspace in which to deploy the service. This is a
                      `azureml.core.Workspace` object.
    :param deployment_config: The configuration for the Azure web service. This configuration
                              allows you to specify the resources the webservice will use and
                              the compute cluster it will be deployed in. If unspecified, the web
                              service will be deployed into a Azure Container Instance. This is a
                              `azureml.core.DeploymentConfig` object. For more information, see
                              `<https://docs.microsoft.com/python/api/azureml-core/
                              azureml.core.webservice.aks.aksservicedeploymentconfiguration>`_ and
                              `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml
                              .core.webservice.aci.aciservicedeploymentconfiguration>`_
    :param service_name: The name to assign the Azure Machine learning webservice that will be
                         created. If unspecified, a unique name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Model and Deployment that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py>`_.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Webservice.wait_for_deployment()`` function to wait
                        for the deployment process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.webservice.Webservice`` object containing metadata for the
            new service.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Create an Azure Container Instance webservice for an MLflow model
        azure_service, azure_model = mlflow.azureml.deploy(model_uri="<model_uri>",
                                                           service_name="<deployment-name>",
                                                           workspace=azure_workspace,
                                                           synchronous=True)
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.model import Model as AzureModel, InferenceConfig
    from azureml.core import Environment as AzureEnvironment
    from azureml.core import VERSION as AZUREML_VERSION
    from azureml.core.webservice import AciWebservice

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, model = _load_pyfunc_conf_with_model(model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    run_id = None
    run_id_tag = None
    try:
        run_id = model.run_id
        run_id_tag = run_id
    except AttributeError:
        run_id = str(uuid.uuid4())
    if model_python_version is not None and\
            StrictVersion(model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
            message=("Azure ML can only deploy models trained in Python 3 and above. See"
                     " the following MLflow GitHub issue for a thorough explanation of this"
                     " limitation and a workaround to enable support for deploying models"
                     " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"),
            error_code=INVALID_PARAMETER_VALUE)

    tags = _build_tags(model_uri=model_uri, model_python_version=model_python_version,
                       user_tags=tags, run_id=run_id_tag)

    if service_name is None:
        service_name = _get_mlflow_azure_name(run_id)
    if model_name is None:
        model_name = _get_mlflow_azure_name(run_id)

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path, dst=model_directory_path))

        registered_model = AzureModel.register(workspace=workspace, model_path=tmp_model_path,
                                               model_name=model_name, tags=tags)

        _logger.info("Registered an Azure Model with name: `%s` and version: `%s`",
                     registered_model.name, registered_model.version)

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path, azure_model=registered_model)

        environment = None
        if pyfunc.ENV in model_pyfunc_conf:
            environment = AzureEnvironment.from_conda_specification(
                _get_mlflow_azure_name(run_id),
                os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]))
        else:
            environment = AzureEnvironment(_get_mlflow_azure_name(run_id))

        if mlflow_home is not None:
            path = tmp.path("dist")
            _logger.info("Bulding temporary MLFlow wheel in %s", path)
            wheel = _create_mlflow_wheel(mlflow_home, path)
            whl_url = AzureEnvironment.add_private_pip_wheel(
                workspace=workspace,
                file_path=wheel,
                exist_ok=True)
            environment.python.conda_dependencies.add_pip_package(whl_url)
        else:
            environment.python.conda_dependencies.add_pip_package(
                "mlflow=={}".format(mlflow_version))

        # AzureML requires azureml-defaults to be installed to include
        # flask for the inference server.
        environment.python.conda_dependencies.add_pip_package(
            "azureml-defaults=={}".format(AZUREML_VERSION))

        inference_config = InferenceConfig(entry_script=execution_script_path,
                                           environment=environment)

        if deployment_config is not None:
            if deployment_config.tags is not None:
                # We want more narrowly-scoped tags to win on merge
                tags.update(deployment_config.tags)
            deployment_config.tags = tags
        else:
            deployment_config = AciWebservice.deploy_configuration(tags=tags)

        webservice = AzureModel.deploy(
            workspace=workspace,
            name=service_name,
            models=[registered_model],
            inference_config=inference_config,
            deployment_config=deployment_config
        )
        _logger.info("Deploying an Azure Webservice with name: `%s`",
                     webservice.name)
        if synchronous:
            webservice.wait_for_deployment(show_output=True)
        return webservice, registered_model
Exemple #18
0
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 23:22:27 2021

@author: RK

"""

#%%
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import ScriptRunConfig

if __name__ == "__main__":
    ws = Workspace.from_config()
    experiment = Experiment(workspace=ws, name='day1-experiment-train')
    config = ScriptRunConfig(source_directory='./scripts',
                             script='testp.py',
                             compute_target='MLcompute01')

    # set up pytorch environment
    env = Environment.from_conda_specification(name='pytorch-env',
                                               file_path='./environ/envpy.yml')
    config.run_config.environment = env

    run = experiment.submit(config)

    aml_url = run.get_portal_url()
    print(aml_url)
Exemple #19
0
from azureml.core import Workspace, Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core import Image

ws = Workspace.from_config()
# env = Environment(name = 'sahulat_env')
env = Environment.from_conda_specification(name='az-ls-ds-ml-env',
                                           file_path='../az-ls-ds-ml-env.yml')

# conda_dep = CondaDependencies()
# # Installs numpy version 1.17.0 conda package
# conda_deps = ['blas=1.0',
#  'ca-certificates=2020.1.1', 'certifi=2020.4.5.1', 'intel-openmp=2019.4', 'joblib=0.14.1', 'libcxx=4.0.1=h579ed51_0',
#  'libcxxabi=4.0.1', 'libedit=3.1.20181209', 'libffi=3.2.1', 'libgfortran=3.0.1', 'libsodium=1.0.16',
#  'llvm-openmp=4.0.1', 'mkl=2019.4', 'mkl-service=2.3.0', 'mkl_fft=1.0.15', 'mkl_random=1.1.0', 'ncurses=6.2',
#  'numpy=1.18.1', 'numpy-base=1.18.1', 'openssl=1.1.1f', 'pandas=1.0.3', 'pandoc=2.2.3.2', 'pip=20.0.2', 'python=3.8.2',
#   'python-dateutil=2.8.1', 'pytz=2019.3', 'readline=8.0', 'scikit-learn=0.22.1', 'scipy=1.4.1', 'setuptools=46.1.3',
#   'six=1.14.0', 'sqlite=3.31.1', 'tk=8.6.8', 'wheel=0.34.2', 'xz=5.2.5', 'zlib=1.2.11']
# pip_deps=['adal==1.2.2', 'azure-common==1.1.25','azure-core==1.4.0','azure-graphrbac==0.61.1','azure-identity==1.2.0',
# 'azure-mgmt-authorization==0.60.0','azure-mgmt-containerregistry==2.8.0','azure-mgmt-keyvault==2.2.0','azure-mgmt-resource==8.0.1',
# 'azure-mgmt-storage==9.0.0','azureml==0.2.7','azureml-core==1.3.0.post1','azureml-dataprep==1.4.3','azureml-dataprep-native==14.1.0',
# 'backports-tempfile==1.0','backports-weakref==1.0.','post1cffi==1.14.0','chardet==3.0.4','cloudpickle==1.3.0',
# 'contextlib2==0.6.0.post1','cryptography==2.9d','istro==1.5.0','docker==4.2.0',
# 'dotnetcore2==2.1.13','idna==2.9','importlib-metadata==1.6.0','isodate==0.6.0','jeepney==0.4.3','jmespath==0.9.5',
# 'jsonpickle==1.4',
# 'msal==1.2.0','msal-extensions==0.1.3','msrest==0.6.13','msrestazure==0.6.3','ndg-httpsclient==0.5.1',
# 'oauthlib==3.1.0','pathspec==0.8.0','portalocker==1.7.0','pyasn1==0.4.8','pycparser==2.20','pyjwt==1.7.1','pyopenssl==19.1.0',
# 'requests==2.23.0','requests-oauthlib==1.3.0','ruamel-yaml==0.15.89','secretstorage==3.1.2','urllib3==1.25.9','websocket-client==0.57.0','zipp==3.1.0']

# for package in conda_deps:
#     conda_dep.add_conda_package(package)
# Get workspace, datastores, and compute targets
print('Connecting to Workspace ...')
workspace = Workspace.from_config()
datastore = workspace.get_default_datastore()
compute_target = workspace.compute_targets[compute_target]

# Get dataset and checkpoints
dataset = workspace.datasets['mask-data'].as_named_input('data').as_mount()
checkpoint = workspace.datasets['ssd-mobilenet-v2-checkpoint'].as_named_input(
    'checkpoint').as_mount()
tensorflow_models = workspace.datasets['tensorflow-models'].as_named_input(
    'tensorflowmodel').as_mount()

# Create run environment
env = Environment.from_conda_specification(name='mask-detector',
                                           file_path='env.yml')
env.docker.enabled = True
env.docker.base_image = 'mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'

run_config = RunConfiguration()
run_config.environment = env

# Step 1: Train Model
train_output_dir = PipelineData(name='train_output',
                                pipeline_output_name='train_output',
                                datastore=datastore,
                                output_mode='mount',
                                is_directory=True)

train_step = PythonScriptStep(name='Train Model',
                              source_directory='./src',
# training script
script_dir = prefix.joinpath("code", "train", "pytorch", "mnist-cnn")
script_name = "train.py"

# environment file
environment_file = prefix.joinpath("environments", "pytorch.yml")

# azure ml settings
environment_name = "pytorch-mnist-example"
experiment_name = "pytorch-mnist-example"
compute_name = "gpu-cluster"

# script arguments
arguments = ["--epochs", 2]

# create environment
env = Environment.from_conda_specification(environment_name, environment_file)

# create job config
src = ScriptRunConfig(
    source_directory=script_dir,
    script=script_name,
    arguments=arguments,
    environment=env,
    compute_target=compute_name,
)

# submit job
run = Experiment(ws, experiment_name).submit(src)
run.wait_for_completion(show_output=True)
# In[74]:

get_ipython().run_cell_magic(
    'writefile', 'inference.py',
    '\nimport json\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport azureml.core\nfrom azureml.core.model import Model\nimport joblib\n\ncolumns = [\'gre\', \'gpa\', \'rank_1\', \'rank_2\', \'rank_3\']\n\ndef init():\n    global model\n    \n    print("Azure ML SDK version:", azureml.core.VERSION)\n    model_name = \'aks-intro\'\n    print(\'Looking for model path for model: \', model_name)\n    model_path = Model.get_model_path(model_name=model_name)\n    print(\'Looking for model in: \', model_path)\n    model = joblib.load(model_path)\n    print(\'Model initialized:\', time.strftime(\'%H:%M:%S\'))\n\ndef run(input_json):     \n    try:\n        inputs = json.loads(input_json)\n        data_df = pd.DataFrame(np.array(inputs).reshape(-1, len(columns)),\n                               columns = columns)\n        # Get the predictions...\n        prediction = model.predict(data_df)\n        prediction = json.dumps(prediction.tolist())\n    except Exception as e:\n        prediction = str(e)\n    return prediction'
)

# ## 2.8 Define the conda environment for the inference script

# In[75]:

get_ipython().system('cat ./environment.yml')

# In[76]:

environment = Environment.from_conda_specification(project,
                                                   './environment.yml')
environment.register(workspace=ws)

# ## 2.9 Define the Docker image configuration

# Use the provided inference script and configure the conda environment in the image as specified

# In[77]:

inference_config = InferenceConfig(entry_script='inference.py',
                                   environment=environment)

# ## 2.10 Define the configuration of the inference container

# Give the single "replica" container running on AKS 0.2 GB of RAM and 0.1 CPU core. Do not scale the number of containers depending on the load. For more configuration options see  https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.akswebservice
Exemple #23
0
        ct = ComputeTarget.create(ws, ct_name, compute_config)
        ct.wait_for_completion(show_output=True)

# create aks compute targets
if args.create_aks:
    for ct_name in akscomputes:
        if ct_name not in ws.compute_targets:
            print(f"Creating AKS compute {ct_name}")
            compute_config = AksCompute.provisioning_configuration(
                **akscomputes[ct_name])
            ct = ComputeTarget.create(ws, ct_name, compute_config)
            ct.wait_for_completion(show_output=True)

if args.datalake_name not in ws.datastores:
    print(f"Creating datastore {args.datalake_name}")
    adlsgen2_datastore = Datastore.register_azure_data_lake_gen2(
        workspace=ws,
        datastore_name=args.datalake_name,
        account_name=args.datalake_name,
        subscription_id=ws.subscription_id,
        resource_group=args.datalake_rg_name,
        filesystem="datalake",
        grant_workspace_access=True,
    )

for env_file in Path("./environments/").glob("*.yml"):
    print(f"Creating env {env_file.name}")
    env = Environment.from_conda_specification(name=env_file.name,
                                               file_path=env_file)
    env.register(ws)
    args = parser.parse_args()

    ws = Workspace.from_config()
    experiment = Experiment(workspace=ws, name='cloud-moa-prediction')

    config = ScriptRunConfig(source_directory='./src',
                             script='remote-train.py',
                             compute_target='cpu-cluster',
                             arguments=[
                                 '--criterion', args.criterion,
                                 '--random_state', args.random_state,
                                 '--class_weight', args.class_weight
                             ])
    # set up pytorch environment
    env = Environment.from_conda_specification(
        name='sklearn-remote-env',
        file_path='./azure-config/compute-env-config.yml')
    config.run_config.environment = env

    run = experiment.submit(config)
    aml_url = run.get_portal_url()
    print("Submitted to compute cluster. Click link below")
    print("")
    print(aml_url)
    run.wait_for_completion(show_output=True)
    run.register_model(
        model_name='moa_prediction_model',
        model_path='outputs/RandomForestClassifier.pkl',  # run outputs path
        description='Random Forest for MoA prediction',
        tags={'data-format': 'CSV'},
        model_framework=Model.Framework.SCIKITLEARN,
Exemple #25
0
def create_inference_config(tmp_dir, model_name, model_version, service_name):
    """
    Create the InferenceConfig object which will be used to deploy.

    :param tmp_dir:
    :type tmp_dir:
    :param model_name:
    :type model_name:
    :param model_version:
    :type model_version:
    :param service_name:
    :type service_name:
    :return:
    :rtype:
    """
    absolute_model_path = _download_artifact_from_uri('models:/{}/{}'.format(model_name, model_version))
    model_folder = absolute_model_path.split(os.path.sep)[-1]
    model_directory_path = tmp_dir.path("model")
    tmp_model_path = os.path.join(
        model_directory_path,
        _copy_file_or_tree(src=absolute_model_path, dst=model_directory_path),
    )

    # Create environment
    env_name = service_name + "-env"
    env_name = env_name[:32]
    mlflow_model = Model.load(os.path.join(absolute_model_path, MLMODEL_FILE_NAME))

    model_pyfunc_conf = load_pyfunc_conf(mlflow_model)
    if pyfunc.ENV in model_pyfunc_conf:
        environment = AzureEnvironment.from_conda_specification(
            env_name,
            os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV])
        )
    else:
        raise MlflowException('Error, no environment information provided with model')

    sample_input_df = None
    sample_output_df = None

    # Leaving this here, commented out for now. The issue is that our swagger handling doesn't work with OpenAPI 3.
    # This runs into issues because a pandas dataframe in a split orient (the default) can have arrays of mixed
    # types, which isn't supported in OpenAPI 2. So for now, we will only use the empty signature to generate
    # swagger, and when we've updated our swagger handling to support OpenAPI 3 we can add this back in.
    """
    if mlflow_model.saved_input_example_info:
        sample_input_file_path = os.path.join(absolute_model_path,
                                              mlflow_model.saved_input_example_info['artifact_path'])
        with open(sample_input_file_path, 'r') as sample_input_file:
            if mlflow_model.saved_input_example_info['type'] == 'dataframe':
                sample_input_df = pandas.read_json(sample_input_file,
                                                   orient=mlflow_model.saved_input_example_info['pandas_orient'])
            else:
                raise MlflowException('Sample model input must be of type "dataframe"')
    """

    if mlflow_model.signature:
        if mlflow_model.signature.inputs and sample_input_df is None:
            # 'is None' check is necessary because dataframes don't like being used as truth values
            columns = mlflow_model.signature.inputs.column_names()
            types = mlflow_model.signature.inputs.pandas_types()
            schema = {}
            for c, t in zip(columns, types):
                schema[c] = t
            df = pandas.DataFrame(columns=columns)
            sample_input_df = df.astype(dtype=schema)
        if mlflow_model.signature.outputs and sample_output_df is None:
            columns = mlflow_model.signature.outputs.column_names()
            types = mlflow_model.signature.outputs.pandas_types()
            schema = {}
            for c, t in zip(columns, types):
                schema[c] = t
            df = pandas.DataFrame(columns=columns)
            sample_output_df = df.astype(dtype=schema)

    # Create execution script
    execution_script_path = tmp_dir.path("execution_script.py")
    create_execution_script(execution_script_path, model_folder, sample_input_df, sample_output_df)

    # Add inference dependencies
    environment.python.conda_dependencies.add_pip_package("mlflow=={}".format(mlflow_version))
    environment.python.conda_dependencies.add_pip_package("inference-schema>=1.2.0")
    environment.python.conda_dependencies.add_pip_package("azureml-model-management-sdk==1.0.1b6.post1")
    environment.python.conda_dependencies.add_pip_package("flask==1.0.3")
    environment.python.conda_dependencies.add_pip_package("gunicorn==19.9.0")
    environment.python.conda_dependencies.add_pip_package("applicationinsights>=0.11.7")
    environment.python.conda_dependencies.add_pip_package("werkzeug>=0.16.1,<=1.0.1")

    # Create InferenceConfig
    inference_config = InferenceConfig(entry_script=execution_script_path, environment=environment)

    return inference_config
        'type': "regression"
    },
    description="Ridge regression model to predict diabetes",
    workspace=ws)

# ### Create Environment

# You can now create and/or use an Environment object when deploying a Webservice. The Environment can have been previously registered with your Workspace, or it will be registered with it as a part of the Webservice deployment. Only Environments that were created using azureml-defaults version 1.0.48 or later will work with this new handling however.
#
# More information can be found in our [using environments notebook](../training/using-environments/using-environments.ipynb).

# In[ ]:

from azureml.core import Environment

env = Environment.from_conda_specification(name='deploytocloudenv',
                                           file_path='myenv.yml')

# This is optional at this point
# env.register(workspace=ws)

# ## Create Inference Configuration
#
# There is now support for a source directory, you can upload an entire folder from your local machine as dependencies for the Webservice.
# Note: in that case, your entry_script, conda_file, and extra_docker_file_steps paths are relative paths to the source_directory path.
#
# Sample code for using a source directory:
#
# ```python
# inference_config = InferenceConfig(source_directory="C:/abc",
#                                    runtime= "python",
#                                    entry_script="x/y/score.py",
Exemple #27
0
        entry_script=deployment_settings["image"]["entry_script"],
        source_directory=deployment_settings["image"]["source_directory"],
        runtime=deployment_settings["image"]["runtime"],
        conda_file=os.path.basename(dep_path),
        extra_docker_file_steps=deployment_settings["image"]["docker"]
        ["extra_docker_file_steps"],
        enable_gpu=deployment_settings["image"]["docker"]["use_gpu"],
        description=deployment_settings["image"]["description"],
        base_image=deployment_settings["image"]["docker"]["custom_image"],
        base_image_registry=container_registry,
        cuda_version=deployment_settings["image"]["docker"]["cuda_version"])

# Registering Environment
print("Registering Environment")
if "env" not in locals():
    env = Environment.from_conda_specification(name=env_name,
                                               file_path=dep_path)
registered_env = env.register(workspace=ws)
print("Registered Environment")
print(registered_env.name, "Version: " + registered_env.version, sep="\n")

# Profile model
print("Profiling Model")
test_sample = test_functions.get_test_data_sample()
profile = Model.profile(workspace=ws,
                        profile_name=deployment_settings["image"]["name"],
                        models=[model],
                        inference_config=inference_config,
                        input_data=test_sample)
profile.wait_for_profiling(show_output=True)
print(profile.get_results(),
      profile.recommended_cpu,
def get_environment(
    workspace: Workspace,
    environment_name: str,
    conda_dependencies_file: str = None,
    create_new: bool = False,
    enable_docker: bool = None,
    docker_image: str = None,
    dockerfile: str = None,
    use_gpu: bool = False,
):
    try:
        e = Env()
        environments = Environment.list(workspace=workspace)
        restored_environment = None
        for env in environments:
            if env == environment_name:
                restored_environment = environments[environment_name]

        if restored_environment is None or create_new:

            # Environment has to be created
            if conda_dependencies_file is not None:
                new_env = Environment.from_conda_specification(
                    environment_name,
                    os.path.join(e.sources_directory_train, conda_dependencies_file),  # NOQA: E501
                )  # NOQA: E501
                restored_environment = new_env
            else:
                restored_environment = Environment(environment_name)

            if enable_docker is not None:
                restored_environment.docker.enabled = enable_docker

                if docker_image is not None:
                    restored_environment.docker.base_image = docker_image
                    # In case of own image
                    # don't append AML managed dependencies
                    restored_environment.python.\
                        user_managed_dependencies = True
                elif dockerfile is not None:
                    # Alternatively, load from a file.
                    with open(dockerfile, "r") as f:
                        dockerfile = f.read()
                        restored_environment.docker.\
                            base_dockerfile = dockerfile
                    # In case of own Dockerfile
                    # don't append AML managed dependencies
                    restored_environment.python.\
                        user_managed_dependencies = True
                else:
                    restored_environment.docker.\
                        base_image = DEFAULT_GPU_IMAGE if use_gpu else DEFAULT_CPU_IMAGE  # NOQA: E501

            restored_environment.register(workspace)

        if restored_environment is not None:
            print(restored_environment)
        return restored_environment
    except Exception as e:
        print(e)
        exit(1)
from azureml.core import ScriptRunConfig
from azureml.core import Dataset

if __name__ == "__main__":
    ws = Workspace.from_config()
    datastore = ws.get_default_datastore()
    dataset = Dataset.File.from_files(path=(datastore, 'datasets/cifar10'))

    experiment = Experiment(workspace=ws, name='day1-experiment-data')

    config = ScriptRunConfig(
        source_directory='./src',
        script='train.py',
        compute_target='cpu-cluster',
        arguments=[
            '--data_path',
            dataset.as_named_input('input').as_mount(), '--learning_rate',
            0.003, '--momentum', 0.92
        ],
    )
    # set up pytorch environment
    env = Environment.from_conda_specification(
        name='pytorch-env', file_path='./.azureml/pytorch-env.yml')
    config.run_config.environment = env

    run = experiment.submit(config)
    aml_url = run.get_portal_url()
    print("Submitted to compute cluster. Click link below")
    print("")
    print(aml_url)
#make enviro from specification file
name: py_env
dependencies:
  - numpy
  - pandas
  - scikit-learn
  - pip:
    - azureml-defaults

#then use this to make ML enviro
from azureml.core import Environment

env = Environment.from_conda_specification(name='training_environment',
                                           file_path='./conda.yml')


#make enviro from existing Conda
from azureml.core import Environment

env = Environment.from_existing_conda_environment(name='training_environment',
                                                  conda_environment_name='py_env')


#by specifying pkg
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies

env = Environment('training_environment')
deps = CondaDependencies.create(conda_packages=['scikit-learn','pandas','numpy'],
                                pip_packages=['azureml-defaults'])
env.python.conda_dependencies = deps