Esempio n. 1
0
def DeployAzureAKS():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    location = request.json['location']
    best_model = request.json['best_model']
    Model_path = request.json['Model_path']
    cluster_name = request.json['cluster_name']
    service_name = request.json['service_name']
    Reg_model_name = request.json['Reg_model_name']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    from azureml.core.model import Model
    model = Model(ws, name=Reg_model_name)
    print(model)

    from azureml.core.model import InferenceConfig
    from azureml.core.webservice import AciWebservice
    from azureml.core.webservice import Webservice
    from azureml.core.model import Model
    from azureml.core.environment import Environment

    from sklearn.externals import joblib
    cwd = 'D:\\DCSAIAUTOML\\BestModels\\Azure'
    model_path = os.path.join(cwd, Model_path, best_model, "outputs")
    #model_path1 = os.path.join(model_path, "outputs", "model.pkl")
    print(model_path)
    os.chdir(model_path)
    #import importlib
    script_file_name = 'scoring_file_v_1_0_0.py'
    conda_env_file_name = 'conda_env_v_1_0_0.yml'
    #importlib.import_module('scoring_file_v_1_0_0.py')
    #script_file_name = joblib.load('scoring_file_v_1_0_0.py')
    #import yaml
    #conda_env_file_name = yaml.load(open('conda_env_v_1_0_0.yml'))
    #conda_env_file_name = joblib.load('conda_env_v_1_0_0.yml')

    myenv = Environment.from_conda_specification(name="myenv",
                                                 file_path=conda_env_file_name)
    inference_config = InferenceConfig(entry_script=script_file_name,
                                       environment=myenv)

    aks_target = AksCompute(ws, cluster_name)
    # If deploying to a cluster configured for dev/test, ensure that it was created with enough
    # cores and memory to handle this deployment configuration. Note that memory is also used by
    # things such as dependencies and AML components.
    try:
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=2,
            memory_gb=16,
            enable_app_insights=True,
            collect_model_data=True,
        )
        service = Model.deploy(ws, service_name, [model], inference_config,
                               deployment_config, aks_target)
        service.wait_for_deployment(show_output=True)
        print(service.state)
        compute_type = service.compute_type
        state = service.state
        url = service.scoring_uri
        s_url = service.swagger_uri
        #created_time = service.created_time
        #updated_time = service.updated_time
        v1 = "@"
        v2 = "Deployed Successfully"
        print(v2)
        return '{} {} {} {} {} {} {} {} {}'.format(v2, v1, compute_type, v1,
                                                   state, v1, url, v1, s_url)

    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        return error_statement
Esempio n. 2
0
my_env = CondaDependencies()
my_env.add_conda_package('scikit-learn')

env_file = 'service_files/env.yml'

with open(env_file, "w") as f:
    f.write(my_env.serialize_to_string())

print("Saved dependency info in", env_file)

#combining both of the script in inference config

from azureml.core.model import InferenceConfig

class_inference_config = InferenceConfig(runtime='python',
                                         source_directory='service_files',
                                         entry_script='score.py',
                                         conda_file='env.yml')

# now inference config is ready now So now we need to configure the compute to which the service will be deployed
#if we are going for AKS cluster then we need to create the cluster and a compute target before deployment

#creating the AKS cluster(Azure kubernetes service)

from azureml.core.compute import ComputeTarget, AksCompute

cluster_name = 'aks-cluster'
compute_config = AksCompute.provisioning_configuration(location="eastus")
production_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
production_cluster.wait_for_completion(show_output=True)

from azureml.core.webservice import AksWebservice
Esempio n. 3
0
ws = Workspace.get(name="QCM",
                   subscription_id='ed4fc9bc-f386-4f01-a8b4-2077312476f3',
                   resource_group='appsvc_linux_centralus')

models = Model.register(
    model_path="model_scaler.joblib",
    model_name="mymodel",
    tags={
        'area': "diabetes",
        'type': "regression"
    },
    description="Ridge regression model to predict diabetes",
    workspace=ws)

# Create inference configuration based on the environment definition and the entry script
myenv = Environment.from_conda_specification(name="env", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)

# Create a local deployment, using port 8890 for the web service endpoint
deployment_config = LocalWebservice.deploy_configuration(port=8890)

# Deploy the service
service = Model.deploy(ws, "mymodel", [models], inference_config,
                       deployment_config)

# Wait for the deployment to complete
service.wait_for_deployment(True)

# Display the port that the web service is available on
print(service.port)
Esempio n. 4
0
from src.utils import update_metadata, load_metadata

# Initialise
METADATA = load_metadata()
SERVICE_DESCRIPTION = 'Heart failure predictor web service'
SERVICE_NAME = 'heartfailure-prediction'
CPU_CORES = 1
MEMORY_GB = 1

# Get environment
workspace = Workspace.from_config()
environment = Environment.get(workspace=workspace, name=METADATA['env_name'])

# Deploy container
inference_config = InferenceConfig(
    entry_script='./src/score.py',
    environment=environment,
)
aci_config = AciWebservice.deploy_configuration(
    cpu_cores=CPU_CORES, memory_gb=MEMORY_GB, description=SERVICE_DESCRIPTION)

# Deploy as web service
try:
    # Remove any existing service under the same name.
    Webservice(workspace, SERVICE_NAME).delete()
except WebserviceException:
    pass

model = Model(workspace, METADATA['model_name'])

webservice = Model.deploy(workspace=workspace,
                          name=SERVICE_NAME,
Esempio n. 5
0
    
    # Deployment Target
    if deploy.get('type') == 'ACI':
        compute_config = AciWebservice.deploy_configuration(cpu_cores=deploy.get('cpu'), memory_gb=deploy.get('memory'), auth_enabled=auth_enabled) #2
    elif deploy.get('type') == 'AKS':
        compute_config = AksWebservice.deploy_configuration()
    
    # Prepare Environment
    environment = Environment('env')
    conda_packages = ['pytorch', 'torchvision']
    pip_packages = ['azureml-defaults'] + pip_packages
    environment.python.conda_dependencies = CondaDependencies.create(pip_packages=pip_packages,
                                                                    conda_packages=conda_packages)

    inference_config = InferenceConfig(entry_script='src/infer.py',
                                   source_directory='.',
                                   environment=environment)
    
    # Create or update service
    service_name = f'{args.project_name}-{env}'.replace('_','-')
    ## Create web service
    service = Model.deploy(workspace=ws, 
                            name=service_name, 
                            models=models, 
                            inference_config=inference_config, 
                            deployment_config=compute_config, 
                            overwrite=True)
    logging.warning('[INFO] Creating web service')
    service.wait_for_deployment(show_output=args.show_output)

    # Get service details
Esempio n. 6
0
freezer_environment = ws.environments["sktime_freezer_environment"]

try:
    service = Webservice(ws, args.webservicename)
except WebserviceException:
    service = None

if args.redeploy:
    if service is not None:
        service.delete()
        print("deleted existing Webservice.")

    model = Model(ws, "sktime_freezer_classifier")

    inference_config = InferenceConfig(
        entry_script="score.py", source_directory="./", environment=freezer_environment
    )

    aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)

    service = Model.deploy(
        workspace=ws,
        name=args.webservicename,
        models=[model],
        inference_config=inference_config,
        deployment_config=aci_config,
    )

    # service.wait_for_deployment(show_output=False)
Esempio n. 7
0
myenv.python.conda_dependencies = CondaDependencies.create(
    pip_packages=[
        "azureml-core", "azureml-defaults", "azureml-telemetry",
        "azureml-train-restclients-hyperdrive", "azureml-train-core",
        "azureml-monitoring", "scikit-learn", "cryptography==3.3.2",
        "Werkzeug==0.16.1"
    ],
    python_version="3.6.2")
myenv.python.conda_dependencies.add_channel("conda-forge")
myenv.spark.packages = [
    SparkPackage("com.microsoft.ml.spark", "mmlspark_2.11", "0.15"),
    SparkPackage("com.microsoft.azure", "azure-storage", "2.0.0"),
    SparkPackage("org.apache.hadoop", "hadoop-azure", "2.7.0")
]
myenv.spark.repositories = ["https://mmlspark.azureedge.net/maven"]
inference_config = InferenceConfig(entry_script='scoreSpark.py',
                                   environment=myenv)

deployment_config = AksWebservice.deploy_configuration(
    auth_enabled=False,
    collect_model_data=True,
    enable_app_insights=True,
    cpu_cores=2,
    memory_gb=2)
aks_target = AksCompute(ws, aks_name)

service = Model.deploy(ws, service_name, [model], inference_config,
                       deployment_config, aks_target)
service.wait_for_deployment(show_output=True)

print(service.state)
print(service.scoring_uri)
service_name = 'sahulat-service'

try:
    Webservice(ws, service_name).delete()
except WebserviceException:
    pass
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies

myenv = Environment.get(workspace=ws, name="myenv")
from azureml.core.model import InferenceConfig

with open('src/score.py') as f:
    print(f.read())

inference_config = InferenceConfig(entry_script='src/score.py',
                                   environment=myenv)
aci_deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                           memory_gb=1)
from azureml.core.webservice import LocalWebservice

local_deployment_config = LocalWebservice.deploy_configuration(port=6789)

service = Model.deploy(workspace=ws,
                       name=service_name,
                       models=[model],
                       inference_config=inference_config,
                       deployment_config=aci_deployment_config,
                       overwrite=True)
service.wait_for_deployment(show_output=True)
print(service.get_logs())
myenv = Environment(name="myenv")
conda_dep = CondaDependencies()
conda_dep.add_pip_package("gensim")
conda_dep.add_pip_package("nltk")
myenv.python.conda_dependencies = conda_dep

# In[8]:

myenv.register(workspace=ws)

# In[9]:

from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice

inference_config = InferenceConfig(entry_script='summarizer_service.py',
                                   environment=myenv)
aci_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                memory_gb=1,
                                                tags={'name': 'Summarization'},
                                                description='Summarizes text.')

# In[10]:

from azureml.core import Model
from azureml.core import Webservice
from azureml.exceptions import WebserviceException

service_name = "summarizer"

# Remove any existing service under the same name.
try:
Esempio n. 10
0
    def deploy(self,
               workspace_name,
               resource_group,
               subscription_id,
               model_name,
               service_name,
               inference_config_file,
               deployment_config_file,
               signing_certificate,
               overwrite,
               verbose):

        ws = Workspace.get(workspace_name,
                           auth=self.auth,
                           resource_group=resource_group,
                           subscription_id=subscription_id)
        inference_config = file_to_inference_config(ws, inference_config_file, description='')

        # Package the model so we have an immutable image url (using hash) to validate later
        models = Model.list(ws, model_name, latest=True)
        if len(models) == 0:
            raise OMLException("Model is not registered")
        if verbose:
            print("Packaging model.")
        model_package = Model.package(ws, [models[0].id], inference_config)
        model_package.wait_for_creation()
        image_uri = model_package.location
        if verbose:
            print("Model package generated at {}.".format(image_uri))

        # Generate a signature containing the immutable model image url
        if verbose:
            print("Generating deployment signature.")
        signature_payload = "imageUrl:{}".format(image_uri)
        signature_bytes = rsa.sign(signature_payload.encode('utf-8'), signing_certificate, 'SHA-384')
        signature = base64.b64encode(signature_bytes).decode('utf-8')

        # Set up base-image-only deploy configuration
        if verbose:
            print("Creating byoc inference config and deployment config.")
        # "AzureML-" is a reserved environment name prefix. Uniquify the environment name if using
        # a curated environment for this model.
        if inference_config.environment.name.startswith("AzureML-"):
            deploy_environment_name = "Office-{}-deploy".format(inference_config.environment.name[8:])
        else:
            deploy_environment_name = "{}-deploy".format(inference_config.environment.name)
        byoc_config = InferenceConfig(entry_script=inference_config.entry_script,
                                      environment=Environment(deploy_environment_name))
        byoc_config.environment.docker.base_image = image_uri
        byoc_config.environment.python.user_managed_dependencies = True
        properties = {
            "isByoc": "true",
            "requestSignaturePayload": signature_payload,
            "requestSignature": signature
        }
        deployment_config = file_to_deploy_config(deployment_config_file, properties)

        # MirWebservice doesn't support service updates.
        # Make sure you remove any existing service under the same name before calling deploy.
        if overwrite:
            if verbose:
                print("Deleting webservice if it exists.")
            try:
                Webservice(ws, service_name).delete()
                if verbose:
                    print("Previous webservice deleted.")
            except WebserviceException:
                pass

        if verbose:
            print("Deploying model.")
        service = Model.deploy(ws, service_name, models, byoc_config, deployment_config)
        try:
            service.wait_for_deployment()
        except WebserviceException:
            print('Failed service operation id: {}'.format(Webservice._get(ws, service_name)['operationId']))
            raise

        print("ScoringUri:{}\n".format(service.scoring_uri))
        return service
Esempio n. 11
0
myenv.add_pip_package("scikit-learn")
# myenv.add_pip_package("azureml-sdk[automl]") # Required for AutoML models

# Save the environment config as a .yml file
env_file = folder_name + "/diabetes_env.yml"
with open(env_file, "w") as f:
    f.write(myenv.serialize_to_string())
print("Saved dependency info in", env_file)

# Print the .yml file
with open(env_file, "r") as f:
    print(f.read())

#Deploy web service

# Configure the scoring environment
inference_config = InferenceConfig(runtime="python",
                                   source_directory=folder_name,
                                   entry_script="score_diabetes.py",
                                   conda_file="diabetes_env.yml")

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1)

service_name = "diabetes-service-v2"

service = Model.deploy(ws, service_name, [model], inference_config,
                       deployment_config)

service.wait_for_deployment(True)
print(service.state)
                                           "pipeline_config.json"))
print("models:")
print(model_names)
models = []
for model_name in model_names:
    models.append(Model(ws, name=model_name))

# Conda environment
myenv = Environment.from_conda_specification(
    "myenv",
    os.path.join(sources_directory_train, "conda_dependencies.yml"))
# Enable Docker based environment
myenv.docker.enabled = True

inference_config = InferenceConfig(
    source_directory=sources_directory_train,
    entry_script="scoring/score.py",
    environment=myenv)

package = Model.package(ws, models, inference_config)
package.wait_for_creation(show_output=True)
# Display the package location/ACR path
# print(package.location)
# location = get_location(package)
location = package.location
print("Image stored at {}".format(location))

# Save the Image Location for other AzDO jobs after script is complete
if args.output_image_location_file is not None:
    print("Writing image location to %s" % args.output_image_location_file)
    with open(args.output_image_location_file, "w") as out_file:
        out_file.write(location)
Esempio n. 13
0
from azureml.core.model import InferenceConfig, Model
from azureml.core.webservice import AciWebservice, Webservice

ws = Workspace(subscription_id="19f6dcec-26c8-4916-89b0-ac775bc5e6b8",
               resource_group="dev",
               workspace_name="climbing-hold-detection")

env = Environment.from_dockerfile(name="yolov5s", dockerfile="Dockerfile")

env.inferencing_stack_version = "latest"
env.python.user_managed_dependencies = True
env.python.interpreter_path = "/usr/bin/python3.7"

inference_config = InferenceConfig(
    environment=env,
    source_directory="./source_dir",
    entry_script="./entry_script.py",
)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1)
deployment_config.auth_enabled = True

model = Model(workspace=ws, name="yolov5s", id="yolov5s_onnx:1")

service = Model.deploy(
    ws,
    "yolov5s-service",
    [model],
    inference_config,
    deployment_config,
Esempio n. 14
0
def DeployAzureACI():
    subscription_id = request.json['subscription_id']
    resource_group = request.json['resource_group']
    workspace_name = request.json['workspace_name']
    location = request.json['location']
    best_model = request.json['best_model']
    Model_path = request.json['Model_path']
    #cluster_name = request.json['cluster_name']
    service_name = request.json['service_name']
    Reg_model_name = request.json['Reg_model_name']

    ws = Workspace(subscription_id=subscription_id,
                   resource_group=resource_group,
                   workspace_name=workspace_name)

    print("Found workspace {} at location {}".format(ws.name, ws.location))
    print('Found existing Workspace.')

    from azureml.core.model import Model
    model = Model(ws, name=Reg_model_name)
    print(model)

    from azureml.core.model import InferenceConfig
    from azureml.core.webservice import AciWebservice
    from azureml.core.webservice import Webservice
    from azureml.core.model import Model
    from azureml.core.environment import Environment

    cwd = 'D:\\DCSAIAUTOML\\BestModels\\Azure'
    model_path = os.path.join(cwd, Model_path, best_model, "outputs")
    #model_path1 = os.path.join(model_path, "outputs", "model.pkl")
    print(model_path)
    os.chdir(model_path)
    #import importlib
    script_file_name = 'scoring_file_v_1_0_0.py'
    conda_env_file_name = 'conda_env_v_1_0_0.yml'
    #importlib.import_module('scoring_file_v_1_0_0.py')
    #script_file_name = joblib.load('scoring_file_v_1_0_0.py')
    #import yaml
    #conda_env_file_name = yaml.load(open('conda_env_v_1_0_0.yml'))
    #conda_env_file_name = joblib.load('conda_env_v_1_0_0.yml')

    myenv = Environment.from_conda_specification(name="myenv",
                                                 file_path=conda_env_file_name)
    inference_config = InferenceConfig(entry_script=script_file_name,
                                       environment=myenv)

    try:
        deployment_config = AciWebservice.deploy_configuration(cpu_cores=2,
                                                               memory_gb=8)
        service = Model.deploy(ws, service_name, [model], inference_config,
                               deployment_config)
        service.wait_for_deployment(show_output=True)
        print(service.state)
        compute_type = service.compute_type
        state = service.state
        url = service.scoring_uri
        s_url = service.swagger_uri
        #created_time = service.created_time
        #updated_time = service.updated_time
        v1 = "@"
        v2 = "Deployed Successfully"
        print(v2)
        return '{} {} {} {} {} {} {} {} {}'.format(v2, v1, compute_type, v1,
                                                   state, v1, url, v1, s_url)

    except Exception as e:
        error_statement = str(e)
        print("Error statement: ", error_statement)
        return error_statement
Esempio n. 15
0
model = Model(ws, name=e.model_name, version=e.model_version)
sources_dir = e.sources_directory_train
if (sources_dir is None):
    sources_dir = 'aml_recommender'
score_script = os.path.join(".", sources_dir, e.score_script)
score_file = os.path.basename(score_script)
path_to_scoring = os.path.dirname(score_script)
cwd = os.getcwd()
# Copy conda_dependencies.yml into scoring as this method does not accept relative paths. # NOQA: E501
shutil.copy(os.path.join(".", sources_dir, "conda_dependencies.yml"),
            path_to_scoring)
os.chdir(path_to_scoring)

scoring_env = Environment.from_conda_specification(
    name="scoringenv", file_path="conda_dependencies.yml")  # NOQA: E501
inference_config = InferenceConfig(entry_script=score_file,
                                   environment=scoring_env)
package = Model.package(ws, [model], inference_config)
package.wait_for_creation(show_output=True)
# Display the package location/ACR path
print(package.location)

os.chdir(cwd)

if package.state != "Succeeded":
    raise Exception("Image creation status: {package.creation_state}")

print("Package stored at {} with build log {}".format(
    package.location, package.package_build_log_uri))  # NOQA: E501

# Save the Image Location for other AzDO jobs after script is complete
if args.output_image_location_file is not None:
interactive_auth = InteractiveLoginAuthentication()

#Set the interactive authentification
ws = Workspace.get(name=AZURE_WORKSPACE_NAME, auth=interactive_auth, subscription_id=AZURE_SUBSCRIPTION_ID,resource_group=AZURE_RESOURCE_GROUP)
service = Webservice(workspace=ws, name=SERVICE_NAME)

#Get model
MODEL_PATH = os.path.join(os.getcwd(),MODEL_PATH)
wget.download(MODEL_URL,MODEL_PATH)

#Register a new model
new_model = Model.register(model_path = MODEL_PATH,
                       model_name = MODEL_NAME,
                       description = MODEL_DESCRIPTION,
                       workspace = ws)

#Create a new image
CONDA_FILE_PATH = os.path.join(os.getcwd(),CONDA_FILE_PATH)
wget.download(CONDA_FILE_URL,CONDA_FILE_PATH)
inference_config = InferenceConfig(entry_script=EXECUTION_SCRIPT_PATH, runtime="python", conda_file=CONDA_FILE_PATH)

#Update the service
#service.update(image=None, tags=None, properties=None, description=None, auth_enabled=None, ssl_enabled=None, ssl_cert_pem_file=None, ssl_key_pem_file=None, ssl_cname=None, enable_app_insights=None, models=None, inference_config=None)
service.update(models=[new_model],inference_config=inference_config)
print(service.state)
print(service.get_logs())
print("service ",SERVICE_NAME," was updated successefuly")

variables.put("SCORING_URI",service.scoring_uri)

print("END " + __file__)
Esempio n. 17
0
    prev_test_dir = model.tags['test_data']
    if prev_test_dir != test_dir or prev_accuracy >= accuracy:
        model = register_model(model_dir, model_name, accuracy, test_dir,
                               workspace)
        new_model = True
except WebserviceException:
    print('Model does not exist yet')
    model = register_model(model_dir, model_name, accuracy, test_dir,
                           workspace)
    new_model = True

# Deploy new webservice if new model was registered
if new_model:
    # Create inference config
    inference_config = InferenceConfig(source_directory='.',
                                       runtime='python',
                                       entry_script='score.py',
                                       conda_file='env.yml')

    # Deploy model
    aci_config = AciWebservice.deploy_configuration(
        cpu_cores=2,
        memory_gb=4,
        tags={
            'model': 'RESNET',
            'method': 'pytorch'
        },
        description='CIFAR-Type object classifier')

    try:
        service = Webservice(workspace, name=service_name)
        if service:
print("Model version: ", args.version)

# Load the AML Workspace and Model
ws = Workspace.from_config(auth=AzureCliAuthentication())

model = Model(workspace=ws, name=args.name, version=args.version)

# Configure Scoring App Environment
scoringenv = Environment.from_conda_specification(
    name="scoringenv",
    file_path=os.path.join(os.path.dirname(os.path.realpath(__file__)),
                           '../../environments/',
                           'scoring/conda_dependencies.yml'))

inference_config = InferenceConfig(entry_script=os.path.join(
    os.path.dirname(os.path.realpath(__file__)), '../modeling/score.py'),
                                   environment=scoringenv)

# Configure Deployment Compute
compute_config = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={'demo': 'onnx'},
    description='ONNX for text')

# Run the deployment
deployment = Model.deploy(workspace=ws,
                          name=args.service_name,
                          models=[model],
                          inference_config=inference_config,
                          deployment_config=compute_config)
from azureml.core.webservice import AciWebservice, Webservice
from azureml.exceptions import WebserviceException
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies

# Remove any existing service under the same name.
try:
    Webservice(ws, webservice_name).delete()
except WebserviceException:
    pass

env = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')

inference_config = InferenceConfig(entry_script="score_sparkml.py",
                                   environment=env)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1,
                                                       auth_enabled=True)

#image_config = ContainerImage.image_configuration(execution_script = "score_sparkml.py",
#                                    runtime = "spark-py",
#                                    conda_file = "mydeployenv.yml")

myservice = Model.deploy(ws, webservice_name, [mymodel], inference_config,
                         deployment_config)

#myservice = Model.deploy(ws, webservice_name, [mymodel], inference_config, myaci_config)
myservice.wait_for_deployment(show_output=True)
Esempio n. 20
0
import logging
logging.basicConfig(level=logging.DEBUG)

#%% [markdown]
#Initialize Workspace
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')

#%% [markdown]
#Inference config
from azureml.core.model import InferenceConfig

inference_config = InferenceConfig(
    source_directory="deploy-source",
    runtime="python",
    entry_script="x/y/score.py",
    conda_file="env/myenv.yml",
    extra_docker_file_steps="dockerstep/customDockerStep.txt")

#%% [markdown]
# Deploy to Docker local
from azureml.core.webservice import LocalWebservice

#this is optional, if not provided we choose random port
deployment_config = LocalWebservice.deploy_configuration(port=6789)

from azureml.core.model import Model
model = Model(workspace=ws, name="aml-deploy-test-regression")

local_service = Model.deploy(ws, "aml-local-deployment", [model],
                             inference_config, deployment_config)
Esempio n. 21
0
get_ipython().system('cat ./environment.yml')

# In[76]:

environment = Environment.from_conda_specification(project,
                                                   './environment.yml')
environment.register(workspace=ws)

# ## 2.9 Define the Docker image configuration

# Use the provided inference script and configure the conda environment in the image as specified

# In[77]:

inference_config = InferenceConfig(entry_script='inference.py',
                                   environment=environment)

# ## 2.10 Define the configuration of the inference container

# Give the single "replica" container running on AKS 0.2 GB of RAM and 0.1 CPU core. Do not scale the number of containers depending on the load. For more configuration options see  https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.akswebservice

# In[78]:

aks_config = AksWebservice.deploy_configuration(
    autoscale_enabled=False,
    cpu_cores=0.1,
    description='Student admissions logistic regression model',
    memory_gb=0.2,
    num_replicas=1,
    tags={'project': project})
Esempio n. 22
0
def deploy_model(ws, aci_service_name, experiment_name, asset_name,
                 asset_label, run_id, mlapp_env, cpu_cores, memory_gb,
                 entry_script):
    inference_config = InferenceConfig(source_directory=os.getcwd(),
                                       entry_script=entry_script,
                                       environment=mlapp_env)

    deployment_config = AciWebservice.deploy_configuration(cpu_cores=cpu_cores,
                                                           memory_gb=memory_gb)

    try:
        service = Webservice(ws, name=aci_service_name)
        if service:
            service.delete()
    except WebserviceException as e:
        pass

    # model name
    model_name = get_model_register_name(run_id)
    try:
        model = Model(ws, name=model_name)
    except:
        # creating directory for download Model files for Model register
        tmp_path = create_tempdir(name='download_tmp')
        register_path = create_directory(AML_MLAPP_FOLDER, path=tmp_path)

        # getting RUN context
        experiment = Experiment(workspace=ws, name=experiment_name)
        tags = {"run_id": run_id, "asset_name": asset_name}
        if asset_label is not None:
            tags["asset_label"] = asset_label

        selected_run_id = None
        for run in Run.list(experiment,
                            tags=tags,
                            include_children=True,
                            status='Completed'):
            run_metrics = run.get_metrics()
            exp_saved_run_id = run_metrics.get("run_id")
            if exp_saved_run_id == run_id:
                selected_run_id = run.id
                break
        if selected_run_id is None:
            raise Exception(
                'ERROR: there is no matching Run object that associated with the run id %s in this experiment.'
                % str(run_id))
        current_run = Run(experiment=experiment, run_id=selected_run_id)

        # download files from run object
        current_run.download_files(output_directory=register_path)

        # register model
        model = Model.register(ws,
                               model_path=register_path,
                               model_name=model_name,
                               tags=tags,
                               description=asset_name)

        # deletes tmp dir and all content
        delete_directory_with_all_contents(tmp_path)

    # deploy model
    service = Model.deploy(ws, aci_service_name, [model], inference_config,
                           deployment_config)

    service.wait_for_deployment(True)
Esempio n. 23
0
    ["conda_packages"],
    pip_packages=deployment_settings["image"]["dependencies"]["pip_packages"],
    python_version=deployment_settings["image"]["dependencies"]
    ["python_version"],
    pin_sdk_version=deployment_settings["image"]["dependencies"]
    ["pin_sdk_version"])
dep_path = os.path.join("code", "scoring", "myenv.yml")
conda_dep.save(path=dep_path)

# Creating InferenceConfig
print("Creating InferenceConfig")
if deployment_settings["image"]["use_custom_environment"]:
    env = utils.get_environment(name_suffix="_deployment")
    inferenceConfig = InferenceConfig(
        entry_script=deployment_settings["image"]["entry_script"],
        source_directory=deployment_settings["image"]["source_directory"],
        runtime=deployment_settings["image"]["runtime"],
        environment=env)
else:
    inference_config = InferenceConfig(
        entry_script=deployment_settings["image"]["entry_script"],
        source_directory=deployment_settings["image"]["source_directory"],
        runtime=deployment_settings["image"]["runtime"],
        conda_file=os.path.basename(dep_path),
        extra_docker_file_steps=deployment_settings["image"]["docker"]
        ["extra_docker_file_steps"],
        enable_gpu=deployment_settings["image"]["docker"]["use_gpu"],
        description=deployment_settings["image"]["description"],
        base_image=deployment_settings["image"]["docker"]["custom_image"],
        base_image_registry=container_registry,
        cuda_version=deployment_settings["image"]["docker"]["cuda_version"])
Esempio n. 24
0
# Add the dependencies for your model
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")

# Save the environment config as a .yml file
env_file = 'service_files/env.yml'
with open(env_file,"w") as f:
    f.write(myenv.serialize_to_string())
print("Saved dependency info in", env_file)


#combine entry and enviro into InferenceConfig
from azureml.core.model import InferenceConfig

classifier_inference_config = InferenceConfig(runtime= "python",
                                              source_directory = 'service_files',
                                              entry_script="score.py",
                                              conda_file="env.yml")

#make AKS cluster
from azureml.core.compute import ComputeTarget, AksCompute

cluster_name = 'aks-cluster'
compute_config = AksCompute.provisioning_configuration(location='eastus')
production_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
production_cluster.wait_for_completion(show_output=True)     


#define deploy config
from azureml.core.webservice import AksWebservice

classifier_deploy_config = AksWebservice.deploy_configuration(cpu_cores = 1,
Esempio n. 25
0
    # Specify the configuration for the new cluster
    compute_config = AksCompute.provisioning_configuration(
        cluster_purpose=AksCompute.ClusterPurpose.DEV_TEST,
        agent_count=1,
        vm_size="Standard_NC6")
    # Create the cluster with the specified name and configuration
    aks_gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name,
                                           compute_config)

    # Wait for the cluster to complete, show the output log
    aks_gpu_cluster.wait_for_completion(show_output=True)

#########################################################################################################
deployment_env = Environment.from_conda_specification(
    name="deployment_env", file_path="./configuration/deployment_env.yml")
inference_config = InferenceConfig(entry_script="./scripts/score/score.py",
                                   environment=deployment_env)

# Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration(
    cpu_cores=2,
    auth_enabled=True,  # this flag generates API keys to secure access
    memory_gb=8,
    #tags={'name': 'mnist', 'framework': 'Keras'},
    #max_request_wait_time=300000,scoring_timeout_ms=300000,
    description='X-Ray ML Estimator AKS endpoint')

#########################################################################################################

service_name = 'mlops-estimator-model-aks'

# Remove any existing service under the same name.
model_name = "net.onnx"

model = Model(workspace=ws, name=model_name)

myenv = CondaDependencies(conda_dependencies_file_path=os.path.join(
    os.path.dirname(os.path.realpath(__file__)), '../../',
    'conda_dependencies.yml'))
myenv.add_channel("pytorch")

with open("myenv.yml", "w") as f:
    f.write(myenv.serialize_to_string())

myenv = Environment.from_conda_specification(name="myenv",
                                             file_path="myenv.yml")

inference_config = InferenceConfig(
    entry_script="code_final/deployment/score.py", environment=myenv)

aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
                                               memory_gb=1,
                                               tags={'demo': 'onnx'},
                                               description='ONNX for text')

aci_service_name = 'onnx-demo2'
print("Service", aci_service_name)
aci_service = Model.deploy(ws, aci_service_name, [model], inference_config,
                           aciconfig)
aci_service.wait_for_deployment(True)
print(aci_service.state)

if aci_service.state != 'Healthy':
    # run this command for debugging.
Esempio n. 27
0
def deploy(model_uri,
           workspace,
           deployment_config=None,
           service_name=None,
           model_name=None,
           tags=None,
           mlflow_home=None,
           synchronous=True):
    """
    Register an MLflow model with Azure ML and deploy a websevice to Azure Container Instances (ACI)
    or Azure Kubernetes Service (AKS).

    The deployed service will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param workspace: The AzureML workspace in which to deploy the service. This is a
                      `azureml.core.Workspace` object.
    :param deployment_config: The configuration for the Azure web service. This configuration
                              allows you to specify the resources the webservice will use and
                              the compute cluster it will be deployed in. If unspecified, the web
                              service will be deployed into a Azure Container Instance. This is a
                              `azureml.core.DeploymentConfig` object. For more information, see
                              `<https://docs.microsoft.com/python/api/azureml-core/
                              azureml.core.webservice.aks.aksservicedeploymentconfiguration>`_ and
                              `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml
                              .core.webservice.aci.aciservicedeploymentconfiguration>`_
    :param service_name: The name to assign the Azure Machine learning webservice that will be
                         created. If unspecified, a unique name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Model and Deployment that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py>`_.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Webservice.wait_for_deployment()`` function to wait
                        for the deployment process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.webservice.Webservice`` object containing metadata for the
            new service.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Create an Azure Container Instance webservice for an MLflow model
        azure_service, azure_model = mlflow.azureml.deploy(model_uri="<model_uri>",
                                                           service_name="<deployment-name>",
                                                           workspace=azure_workspace,
                                                           synchronous=True)
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.model import Model as AzureModel, InferenceConfig
    from azureml.core import Environment as AzureEnvironment
    from azureml.core import VERSION as AZUREML_VERSION
    from azureml.core.webservice import AciWebservice

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, model = _load_pyfunc_conf_with_model(
        model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    run_id = None
    run_id_tag = None
    try:
        run_id = model.run_id
        run_id_tag = run_id
    except AttributeError:
        run_id = str(uuid.uuid4())
    if model_python_version is not None and\
            StrictVersion(model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(message=(
            "Azure ML can only deploy models trained in Python 3 and above. See"
            " the following MLflow GitHub issue for a thorough explanation of this"
            " limitation and a workaround to enable support for deploying models"
            " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"
        ),
                              error_code=INVALID_PARAMETER_VALUE)

    tags = _build_tags(model_uri=model_uri,
                       model_python_version=model_python_version,
                       user_tags=tags,
                       run_id=run_id_tag)

    if service_name is None:
        service_name = _get_mlflow_azure_name(run_id)
    if model_name is None:
        model_name = _get_mlflow_azure_name(run_id)

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path,
                               dst=model_directory_path))

        registered_model = AzureModel.register(workspace=workspace,
                                               model_path=tmp_model_path,
                                               model_name=model_name,
                                               tags=tags)

        _logger.info(
            "Registered an Azure Model with name: `%s` and version: `%s`",
            registered_model.name, registered_model.version)

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path,
                                 azure_model=registered_model)

        environment = None
        if pyfunc.ENV in model_pyfunc_conf:
            environment = AzureEnvironment.from_conda_specification(
                _get_mlflow_azure_name(run_id),
                os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]))
        else:
            environment = AzureEnvironment(_get_mlflow_azure_name(run_id))

        if mlflow_home is not None:
            path = tmp.path("dist")
            _logger.info("Bulding temporary MLFlow wheel in %s", path)
            wheel = _create_mlflow_wheel(mlflow_home, path)
            whl_url = AzureEnvironment.add_private_pip_wheel(
                workspace=workspace, file_path=wheel, exist_ok=True)
            environment.python.conda_dependencies.add_pip_package(whl_url)
        else:
            environment.python.conda_dependencies.add_pip_package(
                "mlflow=={}".format(mlflow_version))

        # AzureML requires azureml-defaults to be installed to include
        # flask for the inference server.
        environment.python.conda_dependencies.add_pip_package(
            "azureml-defaults=={}".format(AZUREML_VERSION))

        inference_config = InferenceConfig(entry_script=execution_script_path,
                                           environment=environment)

        if deployment_config is not None:
            if deployment_config.tags is not None:
                # We want more narrowly-scoped tags to win on merge
                tags.update(deployment_config.tags)
            deployment_config.tags = tags
        else:
            deployment_config = AciWebservice.deploy_configuration(tags=tags)

        webservice = AzureModel.deploy(workspace=workspace,
                                       name=service_name,
                                       models=[registered_model],
                                       inference_config=inference_config,
                                       deployment_config=deployment_config)
        _logger.info("Deploying an Azure Webservice with name: `%s`",
                     webservice.name)
        if synchronous:
            webservice.wait_for_deployment(show_output=True)
        return webservice, registered_model
Esempio n. 28
0
    tenant_id=auth_config["tenant_id"],
    service_principal_id=auth_config["service_principal_id"],
    service_principal_password=os.environ["SP_SECRET"],
)

ws = Workspace(
    subscription_id=auth_config["subscription_id"],
    resource_group=auth_config["resource_group"],
    workspace_name=auth_config["workspace_name"],
    auth=auth,
)

env = Environment.get(workspace=ws, name="component-condition")
env.docker.enabled = True

inf_config = InferenceConfig(entry_script="./score.py", environment=env)
model = Model(ws, name=conf["metadata"]["model_name"])

deployment_config = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=2,
    description="Webservice to predict non-compliant car components.",
    enable_app_insights=True,
)

svc = Model.deploy(
    workspace=ws,
    name="compcondition",
    models=[model],
    inference_config=inf_config,
    deployment_config=deployment_config,
Esempio n. 29
0
ws = Workspace.create(name=workspace_name,
                      subscription_id=subscription_id,
                      resource_group=resource_group,
                      location=workspace_region,
                      exist_ok=True)

# Provision AKS cluster
prov_config = AksCompute.provisioning_configuration(vm_size="Standard_D14")
prov_config.enable_ssl(leaf_domain_label=https_cert)
# Create the cluster
aks_target = ComputeTarget.create(workspace=ws,
                                  name=aks_name,
                                  provisioning_configuration=prov_config)

inference_config = InferenceConfig(runtime="python",
                                   entry_script="aml_app.py",
                                   conda_file="myenv.yml",
                                   extra_docker_file_steps='dockerfile')

aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
                                                    num_replicas=3,
                                                    cpu_cores=2,
                                                    memory_gb=4,
                                                    auth_enabled=False)

aks_service = Model.deploy(ws,
                           models=['aml_app.py'],
                           inference_config=inference_config,
                           deployment_config=aks_python_bot,
                           deployment_target=aks_target,
                           name=aks_service_name)
Esempio n. 30
0
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig

# Create deploy config object
aci_conf = AciWebservice.deploy_configuration(
    cpu_cores=2,
    memory_gb=2,
    description="This is for ADB and AML - Iris model")

# Create yaml to set azure-ml dependency
myenv = Environment(name="myenv")
myenv.python.conda_dependencies.save("./myenv.yml")

# Create inference config with score.py
inf_conf = InferenceConfig(entry_script="score_sparkml.py",
                           conda_file="myenv.yml",
                           runtime="spark-py")

# Deploy and Publish (start your service) !
svc = Model.deploy(name="sparkml-service",
                   deployment_config=aci_conf,
                   models=[mymodel],
                   inference_config=inf_conf,
                   workspace=azure_workspace)

# This will take a few minutes
svc.wait_for_deployment()

# COMMAND ----------

# MAGIC %md