def main():
    # get workspace
    ws = load_workspace()
    model = Model.register(ws,
                           model_name='pytorch_mnist',
                           model_path='model.pth')

    # create dep file
    myenv = CondaDependencies()
    myenv.add_pip_package('numpy')
    myenv.add_pip_package('torch')
    with open('pytorchmnist.yml', 'w') as f:
        print('Writing out {}'.format('pytorchmnist.yml'))
        f.write(myenv.serialize_to_string())
        print('Done!')

    # create image
    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="pytorchmnist.yml",
        dependencies=['./models.py'])

    image = Image.create(ws, 'pytorchmnist', [model], image_config)
    image.wait_for_creation(show_output=True)

    # create service
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1, memory_gb=1, description='simple MNIST digit detection')
    service = Webservice.deploy_from_image(workspace=ws,
                                           image=image,
                                           name='pytorchmnist-svc',
                                           deployment_config=aciconfig)
    service.wait_for_deployment(show_output=True)
def run_inference(test_experiment, compute_target, script_folder, train_run,
                  test_dataset, target_column_name, model_name):

    train_run.download_file('outputs/conda_env_v_1_0_0.yml',
                            'inference/condafile.yml')

    inference_env = Environment("myenv")
    inference_env.docker.enabled = True
    inference_env.python.conda_dependencies = CondaDependencies(
        conda_dependencies_file_path='inference/condafile.yml')

    est = Estimator(source_directory=script_folder,
                    entry_script='infer.py',
                    script_params={
                        '--target_column_name': target_column_name,
                        '--model_name': model_name
                    },
                    inputs=[test_dataset.as_named_input('test_data')],
                    compute_target=compute_target,
                    environment_definition=inference_env)

    run = test_experiment.submit(est,
                                 tags={
                                     'training_run_id':
                                     train_run.id,
                                     'run_algorithm':
                                     train_run.properties['run_algorithm'],
                                     'valid_score':
                                     train_run.properties['score'],
                                     'primary_metric':
                                     train_run.properties['primary_metric']
                                 })

    run.log("run_algorithm", run.tags['run_algorithm'])
    return run
def merge_conda_dependencies(files: List[Path]) -> CondaDependencies:
    """
    Creates a CondaDependencies object from the Conda environments specified in one or more files.
    The resulting object contains the union of the Conda and pip packages in the files, where merging
    is done via the conda_merge package.
    :param files: The Conda environment files to read.
    :return: A CondaDependencies object that contains packages from all the files.
    """
    for file in files:
        _log_conda_dependencies_stats(CondaDependencies(file), f"Conda environment in {file}")
    merged_file = tempfile.NamedTemporaryFile(delete=False)
    merge_conda_files(files, result_file=Path(merged_file.name))
    merged_dependencies = CondaDependencies(merged_file.name)
    _log_conda_dependencies_stats(merged_dependencies, "Merged Conda environment")
    merged_file.close()
    return merged_dependencies
def create_run_config(cpu_cluster, docker_proc_type, conda_env_file):
    """
    AzureML requires the run environment to be setup prior to submission.
    This configures a docker persistent compute.  Even though
    it is called Persistent compute, AzureML handles startup/shutdown
    of the compute environment.

    Args:
        cpu_cluster      (str) : Names the cluster for the test
                                 In the case of unit tests, any of
                                 the following:
                                 - Reco_cpu_test
                                 - Reco_gpu_test
        docker_proc_type (str) : processor type, cpu or gpu
        conda_env_file   (str) : filename which contains info to
                                 set up conda env
    Return:
          run_amlcompute : AzureML run config
    """

    # runconfig with max_run_duration_seconds did not work, check why:
    # run_amlcompute = RunConfiguration(max_run_duration_seconds=60*30)
    run_amlcompute = RunConfiguration()
    run_amlcompute.target = cpu_cluster
    run_amlcompute.environment.docker.enabled = True
    run_amlcompute.environment.docker.base_image = docker_proc_type

    # Use conda_dependencies.yml to create a conda environment in
    # the Docker image for execution
    # False means the user will provide a conda file for setup
    # True means the user will manually configure the environment
    run_amlcompute.environment.python.user_managed_dependencies = False
    run_amlcompute.environment.python.conda_dependencies = CondaDependencies(
        conda_dependencies_file_path=conda_env_file)
    return run_amlcompute
Exemple #5
0
    def get_run_cfg(ws, pip_packages, conda_packages, ext_wheels, gpu=True):
        '''
        get_run_cfg - Retrieves the AMLS run configuration.


        :returns: AMLS run configuration
        :rtype: RunConfiguration object
        '''
        conda_dep = CondaDependencies()
        for pip_package in pip_packages:
            conda_dep.add_pip_package(pip_package)
        for conda_package in conda_packages:
            conda_dep.add_conda_package(conda_package)
        for whl_path in ext_wheels:
            whl_url = Environment.add_private_pip_wheel(workspace=ws,
                                                        file_path=whl_path,
                                                        exist_ok=True)
            conda_dep.add_pip_package(whl_url)
        run_cfg = RunConfiguration(conda_dependencies=conda_dep)
        run_cfg.environment.docker.enabled = True
        run_cfg.environment.docker.gpu_support = gpu
        if gpu:
            run_cfg.environment.docker.base_image = DEFAULT_GPU_IMAGE
        else:
            run_cfg.environment.docker.base_image = DEFAULT_CPU_IMAGE
        run_cfg.environment.spark.precache_packages = False
        return run_cfg
Exemple #6
0
def fetch_run_config(compute_target, base_image, sp_username, sp_tenant,
                     sp_password):
    """ Generates a Run Configuration based on the pipeline parameters,
    specifying such things as the Compute Target and Conda Dependencies. 
    """

    # Inits configuration for Python
    run_config = RunConfiguration(framework="python")

    # Specifies compute target
    run_config.target = compute_target

    # Configures Docker/Image/Environment Variable parameters
    run_config.environment.docker.enabled = True
    run_config.environment.docker.base_image = base_image
    run_config.environment.environment_variables = {
        "SP_USERNAME": sp_username,
        "SP_TENANT": sp_tenant,
        "SP_PASSWORD": sp_password
    }

    # Specifies Conda file location (Auto-injected from preparing staging)
    run_config.environment.python.conda_dependencies = CondaDependencies(
        os.path.join("snapshot", "inputs", "environment.yml"))

    # Returns configuration
    return run_config
Exemple #7
0
def get_environment(
    ws,
    environment_name,
    docker_image="todrabas/aml_rapids:latest",
    python_interpreter="/opt/conda/envs/rapids/bin/python",
    conda_packages=["matplotlib"],
):
    if environment_name not in ws.environments:
        env = Environment(name=environment_name)
        env.docker.enabled = True
        env.docker.base_image = docker_image

        env.python.interpreter_path = python_interpreter
        env.python.user_managed_dependencies = True

        conda_dep = CondaDependencies()

        for conda_package in conda_packages:
            conda_dep.add_conda_package(conda_package)

        env.python.conda_dependencies = conda_dep
        env.register(workspace=ws)
    else:
        env = ws.environments[environment_name]

    return env
Exemple #8
0
def createOrGetEnvironment(ws, login_config, app_config):
    environment_name = login_config["aml_compute"]["environment_name"]
    python_interpreter = login_config["aml_compute"]["python_interpreter"]
    conda_packages = login_config["aml_compute"]["conda_packages"]

    ### CREATE OR RETRIEVE THE ENVIRONMENT
    if environment_name not in ws.environments:
        logger.info(f"Creating {environment_name} environment...")
        env = Environment(name=environment_name)
        env.docker.enabled = login_config["aml_compute"]["docker_enabled"]
        env.docker.base_image = None
        env.docker.base_dockerfile = f'FROM {app_config["base_dockerfile"]}'
        env.python.interpreter_path = python_interpreter
        env.python.user_managed_dependencies = True
        conda_dep = CondaDependencies()

        for conda_package in conda_packages:
            conda_dep.add_conda_package(conda_package)

        env.python.conda_dependencies = conda_dep
        env.register(workspace=ws)
        evn = env
    else:
        logger.info(f"    Environment {environment_name} found...")
        env = ws.environments[environment_name]

    return env
def save_conda_dependencies(amls_config, filename):
    conda_dependencies = CondaDependencies()
    for dependency in amls_config['conda_dependencies']:
        conda_dependencies.add_pip_package(dependency)

    with open(filename, "w") as f:
        f.write(conda_dependencies.serialize_to_string())
Exemple #10
0
def _prepare_environment_definition(base_image, dependencies_file,
                                    distributed):
    logger = logging.getLogger(__name__)
    env_def = EnvironmentDefinition()
    conda_dep = CondaDependencies(
        conda_dependencies_file_path=dependencies_file)
    env_def.python.user_managed_dependencies = False
    env_def.python.conda_dependencies = conda_dep
    env_def.docker.enabled = True
    env_def.docker.gpu_support = True
    env_def.docker.base_image = base_image
    env_def.docker.shm_size = "8g"
    env_def.environment_variables["NCCL_SOCKET_IFNAME"] = "eth0"
    env_def.environment_variables["NCCL_IB_DISABLE"] = 1

    if distributed:
        env_def.environment_variables["DISTRIBUTED"] = "True"
    else:
        env_def.environment_variables["DISTRIBUTED"] = "False"
        logger.info("Adding runtime argument")
        # Adds runtime argument since we aliased nvidia-docker to docker in order to be able to run them as
        # sibling containers. Without this we will get CUDA library errors
        env_def.docker.arguments.extend(["--runtime", "nvidia"])

    return env_def
def create_aml_environment(aml_interface):
    aml_env = Environment(name=AML_ENVIRONMENT_NAME)
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("numpy==1.18.2")
    conda_dep.add_pip_package("pandas==1.0.3")
    conda_dep.add_pip_package("scikit-learn==0.22.2.post1")
    conda_dep.add_pip_package("joblib==0.14.1")
    conda_dep.add_pip_package("azure-storage-blob==12.3.0")

    aml_env.environment_variables[AZURE_STORAGE_ACCOUNT_NAME] = os.getenv(
        AZURE_STORAGE_ACCOUNT_NAME)
    aml_env.environment_variables[AZURE_STORAGE_ACCOUNT_KEY] = os.getenv(
        AZURE_STORAGE_ACCOUNT_KEY)
    aml_env.environment_variables[MODEL_NAME_VARIABLE] = MODEL_NAME

    logger.info(
        f"set environment variables on compute environment: {aml_env.environment_variables}"
    )

    whl_filepath = retrieve_whl_filepath()
    whl_url = Environment.add_private_pip_wheel(
        workspace=aml_interface.workspace,
        file_path=whl_filepath,
        exist_ok=True)
    conda_dep.add_pip_package(whl_url)
    aml_env.python.conda_dependencies = conda_dep
    aml_env.docker.enabled = True
    return aml_env
def run_inference(
    test_experiment,
    compute_target,
    script_folder,
    train_run,
    test_dataset,
    lookback_dataset,
    max_horizon,
    target_column_name,
    time_column_name,
    freq,
):
    model_base_name = "model.pkl"
    if "model_data_location" in train_run.properties:
        model_location = train_run.properties["model_data_location"]
        _, model_base_name = model_location.rsplit("/", 1)
    train_run.download_file(
        "outputs/{}".format(model_base_name), "inference/{}".format(model_base_name)
    )
    train_run.download_file("outputs/conda_env_v_1_0_0.yml", "inference/condafile.yml")

    inference_env = Environment("myenv")
    inference_env.docker.enabled = True
    inference_env.python.conda_dependencies = CondaDependencies(
        conda_dependencies_file_path="inference/condafile.yml"
    )

    est = Estimator(
        source_directory=script_folder,
        entry_script="infer.py",
        script_params={
            "--max_horizon": max_horizon,
            "--target_column_name": target_column_name,
            "--time_column_name": time_column_name,
            "--frequency": freq,
            "--model_path": model_base_name,
        },
        inputs=[
            test_dataset.as_named_input("test_data"),
            lookback_dataset.as_named_input("lookback_data"),
        ],
        compute_target=compute_target,
        environment_definition=inference_env,
    )

    run = test_experiment.submit(
        est,
        tags={
            "training_run_id": train_run.id,
            "run_algorithm": train_run.properties["run_algorithm"],
            "valid_score": train_run.properties["score"],
            "primary_metric": train_run.properties["primary_metric"],
        },
    )

    run.log("run_algorithm", run.tags["run_algorithm"])
    return run
def main():

    ws = Workspace.from_config()

    conda = CondaDependencies()
    conda.add_conda_package("python==3.5")
    conda.add_pip_package("h5py==2.8.0")
    conda.add_pip_package("html5lib==1.0.1")
    conda.add_pip_package("keras==2.2.0")
    conda.add_pip_package("Keras-Applications==1.0.2")
    conda.add_pip_package("Keras-Preprocessing==1.0.1")
    conda.add_pip_package("matplotlib==2.2.2")
    conda.add_pip_package("numpy==1.14.5")
    conda.add_pip_package("opencv-python==3.3.0.9")
    conda.add_pip_package("pandas==0.23.3")
    conda.add_pip_package("Pillow==5.2.0")
    conda.add_pip_package("requests==2.19.1")
    conda.add_pip_package("scikit-image==0.14.0")
    conda.add_pip_package("scikit-learn==0.19.2")
    conda.add_pip_package("scipy==1.1.0")
    conda.add_pip_package("sklearn==0.0")
    conda.add_pip_package("tensorflow==1.9.0")
    conda.add_pip_package("urllib3==1.23")
    conda.add_pip_package("azureml-sdk")

    with open("environment.yml", "w") as f:
        f.write(conda.serialize_to_string())

    with open("environment.yml", "r") as f:
        print(f.read())

    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="environment.yml",
        docker_file="Dockerfile",
        dependencies=DEPENDENCIES)

    webservices = ws.webservices(compute_type='ACI')

    image = ContainerImage.create(name="ai-bootcamp",
                                  models=[],
                                  image_config=image_config,
                                  workspace=ws)

    image.wait_for_creation(show_output=True)

    webservices_list = []
    for key in webservices:
        webservices_list.append(key)

    service_name = webservices_list[0]

    aciwebservice = AciWebservice(ws, service_name)

    aciwebservice.update(image=image)
Exemple #14
0
def create_env():
    myenv = CondaDependencies()
    myenv.add_conda_package("pytorch")
    myenv.add_conda_package("numpy")
    myenv.add_conda_package("torchvision=0.4.1")

    with open("./myenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    return "./myenv.yml"
Exemple #15
0
def create_yaml_file():
    myenv = CondaDependencies()
    myenv.add_conda_package("scikit-learn")
    myenv.add_conda_package("pandas")

    with open("myenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    with open("myenv.yml", "r") as f:
        print(f.read())
Exemple #16
0
def test_framework_version(test_output_dirs: OutputFolderForTests) -> None:
    """
    Test if the Pytorch framework version can be read correctly from the current environment file.
    """
    environment_file = fixed_paths.repository_root_directory(
        ENVIRONMENT_YAML_FILE_NAME)
    assert environment_file.is_file(), "Environment file must be present"
    conda_dep = CondaDependencies(
        conda_dependencies_file_path=environment_file)
    framework = pytorch_version_from_conda_dependencies(conda_dep)
    # If this fails, it is quite likely that the AzureML SDK is behind pytorch, and does not yet know about a
    # new version of pytorch that we are using here.
    assert framework is not None
Exemple #17
0
def main():
    try:
        ws = connectToWorkspace(TENANT_ID, APP_ID, SP_PASSWORD,
                                SUBSCRIPTION_ID, RESOURCE_GROUP,
                                WORKSPACE_NAME)
    except ProjectSystemException as err:
        print('Authentication did not work.')
        return json.dumps('ProjectSystemException')
    except Exception as err:
        print(err)
        sys.exit()
    print("connect")
    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')
    myenv.name = "myenv"
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("azure-storage")
    conda_dep.add_pip_package("azure-storage-file-datalake")
    myenv.python.conda_dependencies = conda_dep
    print("Environment Configured")
    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)

    aks_target_name = "raiaks"

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
    def deploy(self):
        myenv = CondaDependencies()
        myenv.add_pip_package("azureml-sdk")
        myenv.add_pip_package("joblib")
        myenv.add_pip_package("tensorflow")
        myenv.add_pip_package("Pillow")
        myenv.add_pip_package("azureml-dataprep[pandas,fuse]>=1.1.14")

        with open("diagnoz_env.yml", "w") as f:
            f.write(myenv.serialize_to_string())

        huml_env = Environment.from_conda_specification(
            name="diagnoz_env", file_path="diagnoz_env.yml")

        inference_config = InferenceConfig(entry_script="score.py",
                                           source_directory='.',
                                           environment=huml_env)
        print("file deployement : ")
        for root, dir_, files in os.walk(os.getcwd()):
            print("dir_", dir_)
            for filename in files:
                print("filename :", filename)

        aciconfig = AciWebservice.deploy_configuration(
            cpu_cores=1,
            memory_gb=1,
            tags={
                "data": "cancer-data",
                "method": "tensorflow"
            },
            description='Predicting cancer with tensorflow')

        try:
            AciWebservice(self.ws, self.config.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        model = self.ws.models[self.config.MODEL_NAME]

        service = Model.deploy(workspace=self.ws,
                               name=self.config.DEPLOY_SERVICE_NAME,
                               models=[model],
                               inference_config=inference_config,
                               deployment_config=aciconfig)

        service.wait_for_deployment(show_output=True)
        print("success deployement")
    def conda_dependencies(self):
        """
        Get module conda dependencies

        :return: CondaDependencies instance
        """
        cd = CondaDependencies()
        for c in self._get_value('CondaDependencies/CondaChannels'):
            cd.add_channel(c)
        for c in self._get_value('CondaDependencies/CondaPackages'):
            cd.add_conda_package(c)
        for p in self._get_value('CondaDependencies/PipPackages'):
            cd.add_pip_package(p)
        for p in self._get_value('CondaDependencies/PipOptions'):
            cd.set_pip_option(p)
        return cd
Exemple #20
0
def create_aml_environment(aml_interface):
    aml_env = Environment(name=AML_ENV_NAME)
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("numpy==1.18.2")
    conda_dep.add_pip_package("pandas==1.0.3")
    conda_dep.add_pip_package("scikit-learn==0.22.2.post1")
    conda_dep.add_pip_package("joblib==0.14.1")
    whl_filepath = retrieve_whl_filepath()
    whl_url = Environment.add_private_pip_wheel(
        workspace=aml_interface.workspace,
        file_path=whl_filepath,
        exist_ok=True)
    conda_dep.add_pip_package(whl_url)
    aml_env.python.conda_dependencies = conda_dep
    aml_env.docker.enabled = True
    return aml_env
Exemple #21
0
def run_inference(test_experiment, compute_target, script_folder, train_run,
                  test_dataset, lookback_dataset, max_horizon,
                  target_column_name, time_column_name, freq):
    model_base_name = 'model.pkl'
    if 'model_data_location' in train_run.properties:
        model_location = train_run.properties['model_data_location']
        _, model_base_name = model_location.rsplit('/', 1)
    train_run.download_file('outputs/{}'.format(model_base_name),
                            'inference/{}'.format(model_base_name))
    train_run.download_file('outputs/conda_env_v_1_0_0.yml',
                            'inference/condafile.yml')

    inference_env = Environment("myenv")
    inference_env.docker.enabled = True
    inference_env.python.conda_dependencies = CondaDependencies(
        conda_dependencies_file_path='inference/condafile.yml')

    est = Estimator(source_directory=script_folder,
                    entry_script='infer.py',
                    script_params={
                        '--max_horizon': max_horizon,
                        '--target_column_name': target_column_name,
                        '--time_column_name': time_column_name,
                        '--frequency': freq,
                        '--model_path': model_base_name
                    },
                    inputs=[
                        test_dataset.as_named_input('test_data'),
                        lookback_dataset.as_named_input('lookback_data')
                    ],
                    compute_target=compute_target,
                    environment_definition=inference_env)

    run = test_experiment.submit(est,
                                 tags={
                                     'training_run_id':
                                     train_run.id,
                                     'run_algorithm':
                                     train_run.properties['run_algorithm'],
                                     'valid_score':
                                     train_run.properties['score'],
                                     'primary_metric':
                                     train_run.properties['primary_metric']
                                 })

    run.log("run_algorithm", run.tags['run_algorithm'])
    return run
Exemple #22
0
def get_inference_config(environment_name, conda_file, entry_script):
    # Create the environment
    env = Environment(name=environment_name)

    conda_dep = CondaDependencies(conda_file)

    # Define the packages needed by the model and scripts
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("xgboost")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    return inference_config
Exemple #23
0
    def deploy(self):

        try:
            AciWebservice(self.ws, self.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        conda_dep = CondaDependencies()                                        
        conda_dep.add_pip_package("joblib")
        conda_dep.add_pip_package("torch")
        conda_dep.add_pip_package("torchvision")
        conda_dep.add_pip_package("azureml-sdk")
        conda_dep.add_pip_package("azure-storage-blob")
        conda_dep.add_pip_package("PyYAML")
        conda_dep.add_pip_package("scikit-learn")
        conda_dep.add_pip_package("matplotlib")
        conda_dep.add_pip_package("opencensus-ext-azure")
        
        
        shoes_designer_env_file = "shoes_designer_env.yml"
        with open(shoes_designer_env_file,"w") as f:
            f.write(conda_dep.serialize_to_string())

        shoes_designer_env = Environment.from_conda_specification(name="shoes_designer_env", file_path=shoes_designer_env_file)

        inference_config = InferenceConfig(entry_script="score.py", environment=shoes_designer_env)

        aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, 
                                                    memory_gb=2, 
                                                    tags={"method" : "torch"}, 
                                                    description='Generate shoes with torch')

        model = self.ws.models[self.MODEL_NAME]

        service = Model.deploy(workspace=self.ws, 
                            name=self.DEPLOY_SERVICE_NAME, 
                            models=[model], 
                            inference_config=inference_config, 
                            deployment_config=aciconfig,
                            overwrite=True)
        service.wait_for_deployment(show_output=True)

        print("success deployement")        

        return service
def run_rolling_forecast(test_experiment,
                         compute_target,
                         train_run,
                         test_dataset,
                         max_horizon,
                         target_column_name,
                         time_column_name,
                         freq='D',
                         inference_folder='./forecast'):
    condafile = inference_folder + '/condafile.yml'
    train_run.download_file('outputs/model.pkl',
                            inference_folder + '/model.pkl')
    train_run.download_file('outputs/conda_env_v_1_0_0.yml', condafile)

    inference_env = Environment("myenv")
    inference_env.docker.enabled = True
    inference_env.python.conda_dependencies = CondaDependencies(
        conda_dependencies_file_path=condafile)

    est = Estimator(source_directory=inference_folder,
                    entry_script='forecasting_script.py',
                    script_params={
                        '--max_horizon': max_horizon,
                        '--target_column_name': target_column_name,
                        '--time_column_name': time_column_name,
                        '--frequency': freq
                    },
                    inputs=[test_dataset.as_named_input('test_data')],
                    compute_target=compute_target,
                    environment_definition=inference_env)

    run = test_experiment.submit(est,
                                 tags={
                                     'training_run_id':
                                     train_run.id,
                                     'run_algorithm':
                                     train_run.properties['run_algorithm'],
                                     'valid_score':
                                     train_run.properties['score'],
                                     'primary_metric':
                                     train_run.properties['primary_metric']
                                 })

    run.log("run_algorithm", run.tags['run_algorithm'])
    return run
Exemple #25
0
    def get_run_config(self):
        def _get_structured_interface_param(name, param_list):
            return next((param for param in param_list if param.name == name),
                        None)

        param_list = self.default_module_version.interface.parameters
        conda_content = _get_structured_interface_param(
            'CondaDependencies', param_list).default_value
        docker_enabled = _get_structured_interface_param(
            'DockerEnabled', param_list).default_value
        base_docker_image = _get_structured_interface_param(
            'BaseDockerImage', param_list).default_value
        conda_dependencies = CondaDependencies(
            _underlying_structure=ruamel.yaml.safe_load(conda_content))

        run_config = RunConfiguration()
        run_config.environment.docker.enabled = docker_enabled
        run_config.environment.docker.base_image = base_docker_image
        run_config.environment.python.conda_dependencies = conda_dependencies
        return run_config
def merge_conda_dependencies(files: List[Path]) -> CondaDependencies:
    """
    Creates a CondaDependencies object from the Conda environments specified in one or more files.
    The resulting object contains the union of the Conda and pip packages in the files. If there are version
    conflicts in pip packages, the contents of later files are given priority. If there are version
    conflicts in Conda packages, all versions are retained, and conflict resolution is left to Conda.
    :param files: The Conda environment files to read.
    :return: A CondaDependencies object that contains packages from all the files.
    """
    merged_dependencies: Optional[CondaDependencies] = None

    for file in files:
        conda_dependencies = CondaDependencies(file)
        _log_conda_dependencies_stats(conda_dependencies, f"Conda environment in {file}")
        if merged_dependencies is None:
            merged_dependencies = conda_dependencies
        else:
            merged_dependencies._merge_dependencies(conda_dependencies)
            _log_conda_dependencies_stats(merged_dependencies, "Merged Conda environment")
    assert merged_dependencies is not None
    return merged_dependencies
Exemple #27
0
def get_config(entry_script):
    # Create the environment
    env = Environment(name="tensorflow_env")

    conda_dep = CondaDependencies()

    # Define the packages needed by the model and scripts
    conda_dep.add_conda_package("tensorflow")

    # You must list azureml-defaults as a pip dependency
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("keras")
    conda_dep.add_pip_package("pandas")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    print('Configuração do Endpoint retornada')
    return inference_config
from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig
from azureml.core.conda_dependencies import CondaDependencies

"""
$ python -m ml_service.run_local_compute
"""

ws = Workspace.from_config()

environment = Environment(name='mylocal_env')
environment.python.user_managed_dependencies = True
environment.python.conda_dependencies = CondaDependencies(conda_dependencies_file_path="./environment_setup/conda_dependencies.yml")

config = ScriptRunConfig(source_directory='src/steps',
                         script='01_prep_data.py',
                         compute_target='local',  # or 'cpu-cluster'
                         arguments=[
                                '--data_X', 'outputs/diabetes_X2.csv',
                                '--data_y', 'outputs/diabetes_y2.csv'
                         ],
                         environment=environment)

exp = Experiment(workspace=ws, name='mylocal_exp')
run = exp.submit(config)

aml_url = run.get_portal_url()
print(aml_url)
input_dir = DataReference(datastore=default_store,
                          data_reference_name="input_data",
                          path_on_datastore="churn")

processed_dir = PipelineData(name='processed_data', datastore=default_store)

#%% [markdown]
# ## Pipeline 1st step: Data Preprocessing
#
# We start by defining the run configuration with the needed dependencies by the preprocessing step.
#
# In the cell that follow, we compose the first step of the pipeline.
#

#%%
cd = CondaDependencies()
cd.add_conda_package('pandas')
cd.add_conda_package('matplotlib')
cd.add_conda_package('numpy')
cd.add_conda_package('scikit-learn')

run_config = RunConfiguration(framework="python", conda_dependencies=cd)
run_config.target = cluster
run_config.environment.docker.enabled = True
run_config.environment.docker.base_image = DEFAULT_GPU_IMAGE
run_config.environment.python.user_managed_dependencies = False

#%%
pre_processing = PythonScriptStep(
                            name='preprocess dataset',
                            script_name='preprocess.py',
Exemple #30
0
                                   datastore=def_blob_store,
                                   output_name='classification_data',
                                   is_directory=True)

compute_target = ws.compute_targets['cpu-cluster']

environment_variables = {
    'POSTGRES_PASSWORD':
    os.environ['POSTGRES_PASSWORD'],
    'POSTGRES_HOSTNAME':
    'ackbar-postgres.postgres.database.azure.com',
    'AZURE_STORAGE_CONNECTION_STRING':
    os.environ['AZURE_STORAGE_CONNECTION_STRING']
}
env = Environment(name='env', environment_variables=environment_variables)
conda = CondaDependencies()
conda.add_conda_package('psycopg2')
conda.add_conda_package('numpy')
conda.add_conda_package('Pillow')
# have to use pip to install azure packages...
conda.add_pip_package('azure-storage-blob')
env.python.conda_dependencies = conda
run_config = RunConfiguration()
run_config.environment = env

PROJECT = 'caltech'

prepare_step = PythonScriptStep(
    script_name='prepare.py',
    arguments=['--output', batch_input, '--project', PROJECT],
    inputs=[],