Example #1
0
def get_environment(name_suffix="_training"):
    # Load the JSON settings file
    print("Loading settings")
    with open(os.path.join("aml_config", "settings.json")) as f:
        settings = json.load(f)
    env_settings = settings["environment"]
    env_name = settings["experiment"]["name"] + name_suffix

    # Create Dependencies
    print("Defining Conda Dependencies")
    conda_dep = CondaDependencies().create(
        pip_indexurl=None,
        pip_packages=env_settings["pip_packages"],
        conda_packages=env_settings["conda_packages"],
        python_version=env_settings["python_version"],
        pin_sdk_version=env_settings["pin_sdk_version"])
    conda_dep.save(path=env_settings["dependencies_config"]["path"])

    # Create Environment and setting parameters
    print("Creating Environment")
    env = Environment(name=env_name)
    env.python.conda_dependencies = conda_dep
    env.environment_variables = env_settings["env_variables"]

    if env_settings["user_managed_dependencies"]:
        print("Using existing user-managed Python environment for run")
        env.user_managed_dependencies = env_settings[
            "user_managed_dependencies"]
    elif env_settings["docker"]["enabled"]:
        print(
            "Using Docker run with system-built conda environment based on dependency specification"
        )
        env.docker.enabled = env_settings["docker"]["enabled"]
        env.docker.gpu_support = env_settings["docker"]["gpu_support"]
        env.docker.arguments = env_settings["docker"]["arguments"]
        env.docker.shared_volumes = env_settings["docker"]["shared_volumes"]
        env.docker.shm_size = env_settings["docker"]["shm_size"]

        if env_settings["docker"]["gpu_support"] and env_settings["docker"][
                "mpi_image"]:
            env.docker.base_image = azureml.core.runconfig.MPI_GPU_IMAGE
        elif env_settings["docker"]["gpu_support"]:
            env.docker.base_image = azureml.core.runconfig.DEFAULT_GPU_IMAGE
        elif env_settings["docker"]["mpi_image"]:
            env.docker.base_image = azureml.core.runconfig.MPI_CPU_IMAGE

        env.docker.base_image = env_settings["docker"]["base_image"]
        env.docker.base_image_registry.address = env_settings["docker"][
            "base_image_registry"]["address"]
        env.docker.base_image_registry.username = env_settings["docker"][
            "base_image_registry"]["username"]
        env.docker.base_image_registry.password = env_settings["docker"][
            "base_image_registry"]["password"]
    else:
        print(
            "Using system-build conda environment based on dependency specification"
        )
        env.docker.enabled = False
    return env
Example #2
0
def get_or_create_python_environment(azure_config: AzureConfig,
                                     source_config: SourceConfig,
                                     environment_name: str = "",
                                     register_environment: bool = True) -> Environment:
    """
    Creates a description for the Python execution environment in AzureML, based on the Conda environment
    definition files that are specified in `source_config`. If such environment with this Conda environment already
    exists, it is retrieved, otherwise created afresh.
    :param azure_config: azure related configurations to use for model scale-out behaviour
    :param source_config: configurations for model execution, such as name and execution mode
    :param environment_name: If specified, try to retrieve the existing Python environment with this name. If that
    is not found, create one from the Conda files provided. This parameter is meant to be used when running
    inference for an existing model.
    :param register_environment: If True, the Python environment will be registered in the AzureML workspace. If
    False, it will only be created, but not registered. Use this for unit testing.
    """
    # Merge the project-specific dependencies with the packages that InnerEye itself needs. This should not be
    # necessary if the innereye package is installed. It is necessary when working with an outer project and
    # InnerEye as a git submodule and submitting jobs from the local machine.
    # In case of version conflicts, the package version in the outer project is given priority.
    conda_dependencies, merged_yaml = merge_conda_dependencies(source_config.conda_dependencies_files)  # type: ignore
    if azure_config.pip_extra_index_url:
        # When an extra-index-url is supplied, swap the order in which packages are searched for.
        # This is necessary if we need to consume packages from extra-index that clash with names of packages on
        # pypi
        conda_dependencies.set_pip_option(f"--index-url {azure_config.pip_extra_index_url}")
        conda_dependencies.set_pip_option("--extra-index-url https://pypi.org/simple")
    env_variables = {
        "AZUREML_OUTPUT_UPLOAD_TIMEOUT_SEC": str(source_config.upload_timeout_seconds),
        "MKL_SERVICE_FORCE_INTEL": "1",
        **(source_config.environment_variables or {})
    }
    base_image = "mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.2-cudnn8-ubuntu18.04"
    # Create a name for the environment that will likely uniquely identify it. AzureML does hashing on top of that,
    # and will re-use existing environments even if they don't have the same name.
    # Hashing should include everything that can reasonably change. Rely on hashlib here, because the built-in
    # hash function gives different results for the same string in different python instances.
    hash_string = "\n".join([merged_yaml, azure_config.docker_shm_size, base_image, str(env_variables)])
    sha1 = hashlib.sha1(hash_string.encode("utf8"))
    overall_hash = sha1.hexdigest()[:32]
    unique_env_name = f"InnerEye-{overall_hash}"
    try:
        env_name_to_find = environment_name or unique_env_name
        env = Environment.get(azure_config.get_workspace(), name=env_name_to_find, version=ENVIRONMENT_VERSION)
        logging.info(f"Using existing Python environment '{env.name}'.")
        return env
    except Exception:
        logging.info(f"Python environment '{unique_env_name}' does not yet exist, creating and registering it.")
    env = Environment(name=unique_env_name)
    env.docker.enabled = True
    env.docker.shm_size = azure_config.docker_shm_size
    env.python.conda_dependencies = conda_dependencies
    env.docker.base_image = base_image
    env.environment_variables = env_variables
    if register_environment:
        env.register(azure_config.get_workspace())
    return env
Example #3
0
def load_azml_env(img_tag: str = "latest") -> Environment:
    env = Environment("lumonitor")
    env.docker.base_image = f"cspincregistry.azurecr.io/lumonitor-azml:{img_tag}"
    env.python.user_managed_dependencies = True
    env.docker.base_image_registry.address = "cspincregistry.azurecr.io"
    env.docker.base_image_registry.username = os.environ["AZURE_REGISTRY_USERNAME"]
    env.docker.base_image_registry.password = os.environ["AZURE_REGISTRY_PASSWORD"]

    env.environment_variables = dict(
        AZURE_STORAGE_ACCOUNT=os.environ["AZURE_STORAGE_ACCOUNT"],
        AZURE_STORAGE_ACCESS_KEY=os.environ["AZURE_STORAGE_ACCESS_KEY"],
    )

    return env
def get_automl_environment():
    from azureml.core import Environment
    from azureml.core.conda_dependencies import CondaDependencies
    from azureml.core.runconfig import DEFAULT_CPU_IMAGE

    train_env = Environment(name="many_models_environment_automl")
    train_conda_deps = CondaDependencies.create(
        pip_packages=['azureml-sdk[automl]', 'pyarrow==0.14'])
    train_conda_deps.add_pip_package('py-cpuinfo==5.0.0')
    train_conda_deps.add_conda_package('psutil')
    train_conda_deps.add_conda_package('pandas==0.23.4')
    train_conda_deps.add_conda_package('numpy==1.16.2')
    train_conda_deps.add_conda_package('fbprophet==0.5')
    train_conda_deps.add_conda_package('py-xgboost==0.90')
    train_env.python.conda_dependencies = train_conda_deps
    train_env.docker.enabled = True
    train_env.docker.base_image = DEFAULT_CPU_IMAGE
    env = {}
    env['AZUREML_FLUSH_INGEST_WAIT'] = ''
    train_env.environment_variables = env
    return train_env
Example #5
0
    aks_config = AksWebservice.deploy_configuration(
        autoscale_enabled=False,
        auth_enabled=True,
        cpu_cores=0.5,
        memory_gb=1,
        description='Web service to deploy an uploaded model',
        enable_app_insights=True,
        num_replicas=2,
        gpu_cores=None,
        compute_target_name=aks_service_name)

    os.chdir('./score')
    print("Creating environment")
    aks_env = Environment(name=f'{web_service_name}_env')
    aks_env.environment_variables = {
        "STORAGE_CONNECTION": os.getenv("STORAGE_CONNECTION")
    }
    aks_env.python.conda_dependencies = CondaDependencies.create(
        pip_packages=[
            'azureml-defaults', 'azure-storage-blob', 'pynacl==1.2.1'
        ],
        conda_packages=['numpy', 'scikit-learn', 'tensorflow', 'keras'])

    inf_config = InferenceConfig(entry_script="score.py", environment=aks_env)

    print("Attempting to deploy model to web service")
    # Deploy this particular web service to your AKS cluster.
    service = Model.deploy(workspace=ws,
                           name=web_service_name,
                           models=[model],
                           inference_config=inf_config,
# load Workspace
ws = Workspace.from_config()

# load Experiment
experiment = Experiment(workspace=ws, name='rl-credit')

# Create python environment for Azure machine learning expt
# options for class methods: from_conda_specification, from_pip_requirements,
# from_existing_conda_environment
myenv = Environment(name="rl-credit")

## Environment: docker section
myenv.docker.enabled = True
myenv.docker.base_image = "mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04"
# comment out this environment variable if you don't have it set!
myenv.environment_variables = {'WANDB_API_KEY': os.environ['WANDB_API_KEY']}

## Environment: python section
conda_dep = CondaDependencies(conda_dependencies_file_path='environment.yml')
myenv.python.conda_dependencies = conda_dep

# create configuration for Run
# Use RunConfiguration to specify compute target / env deps part of run
run_config = RunConfiguration()

# Attach compute target to run config
run_config.framework = 'python'
run_config.target = "gpu-cluster"
# This doesn't actuallly do anything since my target is a persistent compute instead of amlcompute
run_config.amlcompute.vm_size = "Standard_NC24"
run_config.node_count = 1