コード例 #1
0
def show_environment(
        workspace=None,
        environment_name=None,
        environment_version=None,
        # We should enforce a logger
        logger=None):
    return Environment._serialize_to_dict(Environment.get(workspace, environment_name, environment_version))
コード例 #2
0
ファイル: runOnAzure.py プロジェクト: georgiosN/NeuroLog
def main():
    with open("authentication.json") as jsonFile:
        authData = json.load(jsonFile)[args.auth_cluster]

    # AzureML Subscription Details (get details from the Azure Portal)
    subID = authData["subID"]  # Get from Azure Portal; used for billing
    resGroup = authData["resGroup"]  # Name for the resource group
    wsName = authData[
        "wsName"]  # Name for the workspace, which is the collection of compute clusters + experiments
    computeName = authData["computeName"]  # Name for computer cluster
    ### Get workspace and compute target
    ws = Workspace.get(wsName, subscription_id=subID, resource_group=resGroup)
    compute_target = ComputeTarget(ws, computeName)

    # The path to the dataset. If using RichPath then this should be prefixed with azure://
    # otherwise this is the location where the AzureML Datastore will be mounted
    # datapath_prefix = "azure://example1234/data/"
    # Set up by using the URL like above as well as a generated SAS key, placed into azureinfo.json
    datapath_prefix = authData["dataPath"]
    script_folder = "."
    script_params = OrderedDict([(datapath_prefix + args.train_file_name, ""),
                                 (datapath_prefix + args.validate_file_name,
                                  ""),
                                 (datapath_prefix + args.test_file_name, ""),
                                 ("./model.pkl.gz", ""),
                                 ("--max-num-epochs", args.max_epochs),
                                 ("--aml", ""),
                                 ("--azure-info", "azureinfo.json"),
                                 ("--quiet", "")])
    # we are trying to predict statements
    if args.predicting_statement:
        script_params["--predicting-statement"] = ""

    with open("Dockerfile") as f:
        docker = DockerSection()
        docker.base_image = None
        docker.base_dockerfile = f.read()
        docker.enabled = True

    environment = Environment(name="pytorchenv")
    environment.docker = docker
    environment.python.user_managed_dependencies = True

    est = Estimator(
        source_directory=script_folder,
        script_params=script_params,
        compute_target=compute_target,
        entry_script="ptgnn/implementations/graph2seq/trainandtest.py",
        environment_definition=environment,
        use_docker=True,
    )

    ### Submit the experiment
    exp = Experiment(workspace=ws, name=args.exp_name)
    run = exp.submit(config=est, tags=args.tags)
    print(
        "Experiment Started. Remember you can exit out of this program but the experiment will still run on Azure!"
    )
    # print("Portal URL: ", run.get_portal_url())
    run.wait_for_completion(show_output=True)
コード例 #3
0
def register_environment(
        workspace=None,
        environment_directory=None,
        # We should enforce a logger
        logger=None):
    definition = Environment.load_from_directory(environment_directory)
    result = definition.register(workspace)
    return Environment._serialize_to_dict(result)
コード例 #4
0
def scaffold_environment(
        workspace=None,
        environment_name=None,
        environment_directory=None,
        # We should enforce a logger
        logger=None):
    definition = Environment(environment_name)
    definition.save_to_directory(environment_directory)
コード例 #5
0
def Azure_ML_experiment():

    # create experiment
    experiment_name = 'aftershock-model-train'
    exp = Experiment(workspace=ws, name=experiment_name)

    # creation of compute task
    compute_target = create_compute_resource(ws)

    # to install required packages
    env = Environment('aftershock-env-01')
    cd = CondaDependencies.create(pip_packages=[
        'azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'
    ],
                                  conda_packages=['scikit-learn==0.24.2'])

    env.python.conda_dependencies = cd

    src = ScriptRunConfig(source_directory=os.getcwd(),
                          script='train.py',
                          arguments=[],
                          compute_target=compute_target,
                          environment=env)

    # submit the train script to the experiment
    run = exp.submit(config=src)
    print(run.get_file_names())
コード例 #6
0
def create_aml_environment(aml_interface):
    aml_env = Environment(name=AML_ENV_NAME)
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("numpy==1.18.2")
    conda_dep.add_pip_package("pandas==1.0.3")
    conda_dep.add_pip_package("scikit-learn==0.22.2.post1")
    conda_dep.add_pip_package("joblib==0.14.1")
    whl_filepath = retrieve_whl_filepath()
    whl_url = Environment.add_private_pip_wheel(
        workspace=aml_interface.workspace,
        file_path=whl_filepath,
        exist_ok=True)
    conda_dep.add_pip_package(whl_url)
    aml_env.python.conda_dependencies = conda_dep
    aml_env.docker.enabled = True
    return aml_env
コード例 #7
0
    def __deploy_model(self):
        service_name = self.__args.service_name

        model = Model(self.__ws, self.__args.model_name)
        explainer_model = Model(self.__ws, self.__args.explainer_model_name)
        myenv = Environment.from_conda_specification(
            name=self.__config.get('DEPLOY', 'ENV_NAME'),
            file_path=self.__config.get('DEPLOY', 'ENV_FILE_PATH'))
        inference_config = InferenceConfig(
            entry_script=self.__config.get('DEPLOY', 'SCORE_PATH'),
            environment=myenv,
            source_directory=self.__config.get('DEPLOY',
                                               'DEPENDENCIES_DIRECTORY'))

        if not self.__args.update_deployment:
            deployment_config = AciWebservice.deploy_configuration(
                cpu_cores=self.__config.getint('DEPLOY', 'ACI_CPU'),
                memory_gb=self.__config.getint('DEPLOY', 'ACI_MEM'),
                collect_model_data=True,
                enable_app_insights=True)
            service = Model.deploy(self.__ws, service_name,
                                   [model, explainer_model], inference_config,
                                   deployment_config)
        else:
            service = AciWebservice(self.__ws, service_name)
            service.update(models=[model, explainer_model],
                           inference_config=inference_config)

        service.wait_for_deployment(show_output=True)
        print(service.state)
        print(service.get_logs())
コード例 #8
0
def get_inference_config(aml_interface):
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENVIRONMENT_NAME)
    scoring_script_path = os.path.join(__here__, 'score.py')
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    return inference_config
コード例 #9
0
    def get_run_cfg(ws, pip_packages, conda_packages, ext_wheels, gpu=True):
        '''
        get_run_cfg - Retrieves the AMLS run configuration.


        :returns: AMLS run configuration
        :rtype: RunConfiguration object
        '''
        conda_dep = CondaDependencies()
        for pip_package in pip_packages:
            conda_dep.add_pip_package(pip_package)
        for conda_package in conda_packages:
            conda_dep.add_conda_package(conda_package)
        for whl_path in ext_wheels:
            whl_url = Environment.add_private_pip_wheel(workspace=ws,
                                                        file_path=whl_path,
                                                        exist_ok=True)
            conda_dep.add_pip_package(whl_url)
        run_cfg = RunConfiguration(conda_dependencies=conda_dep)
        run_cfg.environment.docker.enabled = True
        run_cfg.environment.docker.gpu_support = gpu
        if gpu:
            run_cfg.environment.docker.base_image = DEFAULT_GPU_IMAGE
        else:
            run_cfg.environment.docker.base_image = DEFAULT_CPU_IMAGE
        run_cfg.environment.spark.precache_packages = False
        return run_cfg
コード例 #10
0
def download_environment(
        workspace=None,
        environment_name=None,
        environment_version=None,
        environment_directory=None,
        environment_overwrite=None,
        # We should enforce a logger
        logger=None):
    definition = Environment.get(workspace, environment_name, environment_version)
    definition.save_to_directory(environment_directory, environment_overwrite)
コード例 #11
0
def create_aml_environment(aml_interface):
    aml_env = Environment(name=AML_ENVIRONMENT_NAME)
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("numpy==1.18.2")
    conda_dep.add_pip_package("pandas==1.0.3")
    conda_dep.add_pip_package("scikit-learn==0.22.2.post1")
    conda_dep.add_pip_package("joblib==0.14.1")
    conda_dep.add_pip_package("azure-storage-blob==12.3.0")

    aml_env.environment_variables[AZURE_STORAGE_ACCOUNT_NAME] = os.getenv(
        AZURE_STORAGE_ACCOUNT_NAME)
    aml_env.environment_variables[AZURE_STORAGE_ACCOUNT_KEY] = os.getenv(
        AZURE_STORAGE_ACCOUNT_KEY)
    aml_env.environment_variables[MODEL_NAME_VARIABLE] = MODEL_NAME

    logger.info(
        f"set environment variables on compute environment: {aml_env.environment_variables}"
    )

    whl_filepath = retrieve_whl_filepath()
    whl_url = Environment.add_private_pip_wheel(
        workspace=aml_interface.workspace,
        file_path=whl_filepath,
        exist_ok=True)
    conda_dep.add_pip_package(whl_url)
    aml_env.python.conda_dependencies = conda_dep
    aml_env.docker.enabled = True
    return aml_env
コード例 #12
0
def deploy(local, aks, aci, num_cores, mem_gb, compute_name):
    # Get the workspace
    ws = Workspace.from_config()
    # Create inference configuration based on the environment definition and the entry script
    # yolo = Environment.from_conda_specification(name="env", file_path="yolo.yml")
    yolo = Environment.from_pip_requirements(
        name="yolo", file_path="./deployed_requirements.txt")
    # yolo.save_to_directory('')
    yolo.register(workspace=ws)
    inference_config = InferenceConfig(entry_script="azure.py",
                                       environment=yolo,
                                       source_directory="yolov5")
    # Retrieve registered model
    model = Model(ws, id="lpr:1")
    deploy_target = None
    if local:
        # Create a local deployment, using port 8890 for the web service endpoint
        deployment_config = LocalWebservice.deploy_configuration(port=8890)
    elif aks:
        # Create a AKS deployment
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
        deploy_target = ComputeTarget(workspace=ws, name=compute_name)
        # if deploy_target.get_status() != "Succeeded":
        #     print(f"Deploy Target: {deploy_target.get_status()}")
        #     deploy_target.wait_for_completion(show_output=True)
    elif aks:
        # Create a AKS deployment
        deployment_config = AciWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
    else:
        raise NotImplementedError("Choose deploy target please")
    # Deploy the service
    print("Deploying:")
    service = Model.deploy(workspace=ws,
                           name="lpr",
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config,
                           overwrite=True,
                           deployment_target=deploy_target)
    # Wait for the deployment to complete
    print("Deploying:")
    service.wait_for_deployment(True)
    # Display the port that the web service is available on
    if local:
        print(service.port)
コード例 #13
0
def get_environment(name="deploy", file_path="environment.yml"):
    """
    Return an environment
    """
    environment = Environment.from_conda_specification(
        name=name,
        file_path=file_path,
    )
    environment.python.user_managed_dependencies = False
    environment.docker.enabled = True
    environment.docker.base_image = (
        "mcr.microsoft.com/azureml/intelmpi2018.3-ubuntu16.04:20200821.v1"
    )
    return environment
コード例 #14
0
def main():
    try:
        ws = connectToWorkspace(TENANT_ID, APP_ID, SP_PASSWORD,
                                SUBSCRIPTION_ID, RESOURCE_GROUP,
                                WORKSPACE_NAME)
    except ProjectSystemException as err:
        print('Authentication did not work.')
        return json.dumps('ProjectSystemException')
    except Exception as err:
        print(err)
        sys.exit()
    print("connect")
    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')
    myenv.name = "myenv"
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("azure-storage")
    conda_dep.add_pip_package("azure-storage-file-datalake")
    myenv.python.conda_dependencies = conda_dep
    print("Environment Configured")
    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)

    aks_target_name = "raiaks"

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
コード例 #15
0
def get_inference_config(environment_name, conda_file, entry_script):
    # Create the environment
    env = Environment(name=environment_name)

    conda_dep = CondaDependencies(conda_file)

    # Define the packages needed by the model and scripts
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("xgboost")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    return inference_config
コード例 #16
0
    def _register_private_pip_wheel_to_blob(workspace, file_path, container_name=None, blob_name=None):
        """Register the private pip package wheel file on disk to the Azure storage blob attached to the workspace.

        :param workspace: Workspace object to use to register the private pip package wheel.
        :type workspace: azureml.core.workspace.Workspace
        :param file_path: Path to the local pip wheel file on disk, including the file extension.
        :type file_path: str
        :param container_name: Container name to use to store the pip wheel. Defaults to private-packages.
        :type container_name: str
        :param blob_name: Full path to use to store the pip wheel on the blob container.
        :type blob_name: str
        :return: Returns the full URI to the uploaded pip wheel on Azure blob storage to use in conda dependencies.
        :rtype: str
        """
        import logging
        logging.warning("_register_private_pip_wheel_to_blob() is going to be removed in the next SDK release."
                        "Please use Environment.add_private_pip_wheel() instead.")
        from azureml.core.environment import Environment
        return Environment.add_private_pip_wheel(workspace, file_path)
コード例 #17
0
def main():
    ws = Workspace.get(name=WORKSPACE_NAME,
                       subscription_id=SUBSCRIPTION_ID,
                       resource_group=RESOURCE_GROUP)

    print("connect")

    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.from_conda_specification(name='myenv',
                                                 file_path="environment.yml")
    myenv.docker.base_image = "mcr.microsoft.com/mmlspark/release"
    myenv.inferencing_stack_version = 'latest'
    print("Environment Configured")

    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)
    aks_target_name = AKS_CLUSTER_NAME

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
コード例 #18
0
ファイル: deploy.py プロジェクト: CathyZhou0120/pipelines
def main():
    workspace_name = os.environ['AML_WORKSPACE_NAME']
    resource_group = os.environ['RESOURCE_GROUP']
    subscription_id = os.environ['SUBSCRIPTION_ID']

    spn_credentials = {
        'tenant_id': os.environ['TENANT_ID'],
        'service_principal_id': os.environ['SPN_ID'],
        'service_principal_password': os.environ['SPN_PASSWORD'],
    }

    aml_interface = AMLInterface(spn_credentials, subscription_id,
                                 workspace_name, resource_group)

    scoring_script_path = os.path.join(__here__, 'score.py')
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENV_NAME)
    service_name = 'aml-pipeline-deploy-3'
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    deploy(aml_interface, inference_config, service_name)
コード例 #19
0
ファイル: deploy.py プロジェクト: lfbraz/azure-mlops
def get_config(entry_script):
    # Create the environment
    env = Environment(name="tensorflow_env")

    conda_dep = CondaDependencies()

    # Define the packages needed by the model and scripts
    conda_dep.add_conda_package("tensorflow")

    # You must list azureml-defaults as a pip dependency
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("keras")
    conda_dep.add_pip_package("pandas")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    print('Configuração do Endpoint retornada')
    return inference_config
コード例 #20
0
def deploy_models():
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        tags={
            "data": "Earthquake",
            "method": "sklearn"
        },
        description='Predict aftershock situation '
        'using linear models in sklearn')

    # env = Environment('aftershock-env')
    # cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]',
    #                                             'azureml-defaults'],
    #                               conda_packages=['scikit-learn==0.24.2'])
    # env.python.conda_dependencies = cd
    env = Environment.get(workspace=ws, name='aftershock-env')

    inference_config = InferenceConfig(entry_script="predict.py",
                                       environment=env)
    model = Model(ws, "aftershock_model")

    # service_name = 'sklearn-aftershock-svc-' + str(uuid.uuid4())[:4]
    service_name = "sklearn-aftershock-svc-f41b"
    service = Model.deploy(workspace=ws,
                           name=service_name,
                           models=[model],
                           overwrite=True,
                           inference_config=inference_config,
                           deployment_config=aciconfig)

    service.wait_for_deployment(show_output=True)
    print(service.get_logs())
    print(service.scoring_uri)
    print("service keys")
    print(service.get_keys())
    print("service token")
    print(service.get_token())
コード例 #21
0
ファイル: azureml.py プロジェクト: dsame/cli-demo
def file_to_inference_config(workspace, inference_config_file, description):
    with open(inference_config_file) as inference_file_stream:
        inference_config_obj = file_stream_to_object(inference_file_stream)

        # Retrieve Environment object from the name in the InferenceConfig file
        if 'environment' not in inference_config_obj:
            raise OMLException("need to specify environment in --deploy-config-file")
        environment_name = inference_config_obj.get('environment')["name"]
        environment = Environment.get(workspace, name=environment_name)

        inference_config = InferenceConfig(
            entry_script=inference_config_obj.get('entryScript'),
            runtime=inference_config_obj.get('runtime'),
            conda_file=inference_config_obj.get('condaFile'),
            extra_docker_file_steps=inference_config_obj.get('extraDockerfileSteps'),
            source_directory=inference_config_obj.get('sourceDirectory'),
            enable_gpu=inference_config_obj.get('enableGpu'),
            base_image=inference_config_obj.get('baseImage'),
            base_image_registry=inference_config_obj.get('baseImageRegistry'),
            cuda_version=inference_config_obj.get('cudaVersion'),
            environment=environment,
            description=description)
        return inference_config
コード例 #22
0
args = parser.parse_args()

model = Model(ws, name=e.model_name, version=e.model_version)
sources_dir = e.sources_directory_train
if (sources_dir is None):
    sources_dir = 'londonboiler'
score_script = os.path.join(".", sources_dir, e.score_script)
score_file = os.path.basename(score_script)
path_to_scoring = os.path.dirname(score_script)
cwd = os.getcwd()
# Copy conda_dependencies.yml into scoring as this method does not accept relative paths. # NOQA: E501
shutil.copy(os.path.join(".", sources_dir, "conda_dependencies.yml"),
            path_to_scoring)
os.chdir(path_to_scoring)

scoring_env = Environment.from_conda_specification(
    name="scoringenv", file_path="conda_dependencies.yml")  # NOQA: E501
inference_config = InferenceConfig(entry_script=score_file,
                                   environment=scoring_env)
package = Model.package(ws, [model], inference_config)
package.wait_for_creation(show_output=True)
# Display the package location/ACR path
print(package.location)

os.chdir(cwd)

if package.state != "Succeeded":
    raise Exception("Image creation status: {package.creation_state}")

print("Package stored at {} with build log {}".format(
    package.location, package.package_build_log_uri))  # NOQA: E501
コード例 #23
0
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig, Model
from azureml.core.webservice import LocalWebservice


# Create inference configuration based on the environment definition and the entry script
myenv = Environment.from_conda_specification(name="ml", file_path="C:\\Users\\rbenn\\AppData\\Local\\Packages\\CanonicalGroupLimited.UbuntuonWindows_79rhkp1fndgsc\\LocalState\\rootfs\\home\\rbennett\\mlenvironment.yml")
inference_config = InferenceConfig(entry_script="EntryScript.py", environment=myenv)
# Create a local deployment, using port 8890 for the web service endpoint
deployment_config = LocalWebservice.deploy_configuration(port=8890)
# Deploy the service
service = Model.deploy(
    ws, "mymodel", [model], inference_config, deployment_config)
# Wait for the deployment to complete
service.wait_for_deployment(True)
# Display the port that the web service is available on
print(service.port)
コード例 #24
0
aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={
        "data": "Random Recommender",
        "Gestore": "Alessandro Artoni",
        "Owner": "Alessandro Artoni",
        "Environment": "dev",
        "Progetto": "Random recommender example"
    },
    description='Example on how to deploy a random recommender',
)

logger.info("ACI Deployed")

env = Environment('custom')
env.python.conda_dependencies = CondaDependencies.create(
    pip_packages=['azureml-defaults', 'joblib', 'numpy'])

inference_config = InferenceConfig(entry_script="score.py",
                                   source_directory="recommender",
                                   environment=env)

logger.info("Inference config setted")

if (WHERE_TO_DEPLOY == "LOCAL"):
    deployment_config = LocalWebservice.deploy_configuration(port=8890)
    # Deploy the service
    service = Model.deploy(ws, "localmodel", [model], inference_config,
                           deployment_config)
    # Wait for the deployment to complete
コード例 #25
0
# %%
from azureml.core.conda_dependencies import CondaDependencies 

myenv = CondaDependencies.create(pip_packages=["numpy","onnxruntime","azureml-core", "azureml-defaults"])

with open("myenv.yml","w") as f:
    f.write(myenv.serialize_to_string())   

# %%    
# 
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment


myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv) 

# %%    
from azureml.core.webservice import AciWebservice

aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, 
                                               memory_gb = 1, 
                                               tags = {'demo': 'onnx'}, 
                                               description = 'web service for MNIST ONNX model')
#%%
from azureml.core.model import Model
from random import randint

aci_service_name = 'onnx-demo-mnist'+str(randint(0,100))
print("Service", aci_service_name)
コード例 #26
0
print(run)

model = run.register_model(model_name="turbofan-rul",
                           model_path="outputs/model.pkl",
                           tags={
                               "mae": run_metrics["mae"],
                               "python version": sys.version[0:6]
                           })

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1,
                                                       auth_enabled=True)
#deployment_config = LocalWebservice.deploy_configuration(port=8890) ##deploy locally for testing

myenv = Environment(name="myenv")
conda_dep = CondaDependencies("turbofan.yml")
#conda_dep.add_conda_package("numpy")
#conda_dep.add_conda_package("scikit-learn")
# You must list azureml-defaults as a pip dependency
#conda_dep.add_pip_package("azureml-defaults")

myenv.python.conda_dependencies = conda_dep

inference_config = InferenceConfig(entry_script="score.py", environment=myenv)

#image_config = ContainerImage.image_configuration(execution_script = "score.py",
# runtime = "python",
#conda_file = "turbofan.yml")

try:
コード例 #27
0
# In[27]:

#get_ipython().run_cell_magic('writefile', 'score.py', "\nimport json\nimport sys\nimport joblib\n\nfrom azureml.core.model import Model\nimport numpy as np\n\ndef init():\n\n    global path\n    model_path = Model.get_model_path('demodrug')\n    model = joblib.load(model_path)\n\ndef run(raw_data):\n    try:\n        data = json.loads(raw_data)['data']\n        data = numpy.array(data)\n        result  = model.predict(data)\n        return result.tolist()\n    except Exception as e:\n        result = str(e)\n        return error")

# # Describe your environment
# Each modelling process may require a unique set of packages. Therefore we need to create a dependency file providing instructions to AML on how to contstruct a docker image that can support the models and any other objects required for inferencing. In the following cell, we create a environment dependency file, myenv.yml that specifies which libraries are needed by the scoring script. You can create this file manually, or use the CondaDependencies class to create it for you.
#
# Next we use this environment file to describe the docker container that we need to create in order to deploy our model. This container is created using our environment description and includes our scoring script.
#

# In[28]:

from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.environment import Environment

env = Environment(name="env")

myenv = CondaDependencies()
myenv.add_pip_package("numpy")
myenv.add_pip_package("azureml-core")
myenv.add_pip_package("sklearn")

# Adds dependencies to PythonSection of myenv
env.python.conda_dependencies = myenv

print(myenv.serialize_to_string())

with open("myenv.yml", "w") as f:
    f.write(myenv.serialize_to_string())

# # Create an image configuration
コード例 #28
0
    aks_target = ComputeTarget.create(workspace=ws,
                                      name=aks_name,
                                      provisioning_configuration=prov_config)

    aks_target.wait_for_completion(show_output=True)

# Define the deployment configuration
gpu_aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,
                                                    num_replicas=3,
                                                    cpu_cores=2,
                                                    memory_gb=4)

# Define the inference configuration
myenv = Environment.from_conda_specification(
    name="testEnv",
    file_path=
    "C:/Users/Danilo.Bento/Icon Dropbox/DEVDATA/RO/DEVELOPMENT/SIB2/tutorials/model5/mod5_deploy/deploy_env.yaml"
)

myenv.docker.base_image = DEFAULT_GPU_IMAGE
inference_config = InferenceConfig(  #entry_script=os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'yolov5','score.py'),
    #entry_script="./yolov5/score.py",
    entry_script="score.py",
    environment=myenv,
    source_directory=
    "C:/Users/Danilo.Bento/Icon Dropbox/DEVDATA/RO/DEVELOPMENT/SIB2/tutorials/model5/mod5_deploy/deployassets"
)

# Name of the web service that is deployed
aks_service_name = 'aks-mod5-test'
# Get the registerd model
コード例 #29
0
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core.webservice import AciWebservice

ws = Workspace.from_config(path='./.azureml', _file_name='config.json')
model = Model(ws, name='titanic_model', version=2)

env = Environment.from_conda_specification(
    name='sklearn-aml-env', file_path='./.azureml/sklearn-env-aml.yml')

inference_config = InferenceConfig(entry_script="./src/score.py",
                                   environment=env)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1)

aci_service = Model.deploy(workspace=ws,
                           name='titanic-model-2',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config)

aci_service.wait_for_deployment(show_output=True)
print(aci_service.state)
コード例 #30
0
#
# TODO: In the cell below, register the model, create an inference config and deploy the model as a web service.
#
#

# +
# Prepare environment config
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
if not 'temp' in os.listdir():
    os.mkdir('temp')
df_test.to_json("temp/test_sample.json")  # save data for external tests

# Create the environment
myenv = Environment(name="mortgage_score_env_hd")
conda_dep = CondaDependencies()

# Define the packages needed by the model and scripts
conda_dep.add_conda_package("numpy")
conda_dep.add_conda_package("pip")
conda_dep.add_conda_package("scikit-learn=0.20.3")
# You must list azureml-defaults as a pip dependency
conda_dep.add_pip_package("azureml-defaults==1.11.0")
conda_dep.add_pip_package("azureml-core")
conda_dep.add_pip_package("azureml-automl-runtime")
conda_dep.add_pip_package("packaging")
conda_dep.add_pip_package("azureml-explain-model==1.11.0")
conda_dep.add_pip_package("inference-schema")
conda_dep.add_conda_package("numpy")
# scikit-learn>=0.19.0,<=0.20.3