Exemplo n.º 1
0
def main():
    """
    Deploy model to your service
    """
    work_space = Workspace.from_config()
    environment = Environment("keras-service-environment")
    environment.python.conda_dependencies = CondaDependencies.create(
        python_version="3.7.7",
        pip_packages=["azureml-defaults", "numpy", "tensorflow==2.3.1"],
    )
    model = Model(work_space, "keras_mnist")
    model_list = model.list(work_space)
    validation_accuracy = []
    version = []
    for i in model_list:
        validation_accuracy.append(float(i.properties["val_accuracy"]))
        version.append(i.version)
    model = Model(work_space,
                  "keras_mnist",
                  version=version[np.argmax(validation_accuracy)])
    service_name = "keras-mnist-service"
    inference_config = InferenceConfig(entry_script="score_keras.py",
                                       environment=environment)
    aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)
    service = Model.deploy(
        workspace=work_space,
        name=service_name,
        models=[model],
        inference_config=inference_config,
        deployment_config=aci_config,
        overwrite=True,
    )
    service.wait_for_deployment(show_output=True)
    print(service.get_logs())
Exemplo n.º 2
0
def get_inference_config(aml_interface):
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENVIRONMENT_NAME)
    scoring_script_path = os.path.join(__here__, 'score.py')
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    return inference_config
def main(args):

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace and download config.json from Azure Portal
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    model = Model(ws, name=args.model_workspace)

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    package = Model.package(ws, [model],
                            inference_config,
                            generate_dockerfile=True)
    package.wait_for_creation(show_output=True)
    # Download the package.
    package.save("./" + args.out_dir)
    # Get the Azure container registry that the model/Dockerfile uses.
    acr = package.get_container_registry()
    print("Address:", acr.address)
    print("Username:"******"Password:", acr.password)
Exemplo n.º 4
0
def main():
    print('Cargando configuracion workspace...')
    ws = Workspace.from_config()

    print('Obteniendo modelo...')
    model = Model(ws, 'yolov3-tf')

    print("Configurando Objects...")
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=2,
        memory_gb=2,
        tags={"data": "solo yolov3 tensorflow"},
        description='yolov3 y tensorflow',
        dns_name_label='ceibatest')

    inference_config = InferenceConfig(entry_script="score.py",
                                       source_directory="../azure",
                                       conda_file='conda-cpu.yml',
                                       runtime='python')

    print("Desplegando...")
    service = Model.deploy(workspace=ws,
                           name='yolov3-tf-deploy',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=aciconfig,
                           overwrite=True)

    service.wait_for_deployment(show_output=True)
    url = service.scoring_uri
    print(url)
Exemplo n.º 5
0
    def __deploy_model(self):
        service_name = self.__args.service_name

        model = Model(self.__ws, self.__args.model_name)
        explainer_model = Model(self.__ws, self.__args.explainer_model_name)
        myenv = Environment.from_conda_specification(
            name=self.__config.get('DEPLOY', 'ENV_NAME'),
            file_path=self.__config.get('DEPLOY', 'ENV_FILE_PATH'))
        inference_config = InferenceConfig(
            entry_script=self.__config.get('DEPLOY', 'SCORE_PATH'),
            environment=myenv,
            source_directory=self.__config.get('DEPLOY',
                                               'DEPENDENCIES_DIRECTORY'))

        if not self.__args.update_deployment:
            deployment_config = AciWebservice.deploy_configuration(
                cpu_cores=self.__config.getint('DEPLOY', 'ACI_CPU'),
                memory_gb=self.__config.getint('DEPLOY', 'ACI_MEM'),
                collect_model_data=True,
                enable_app_insights=True)
            service = Model.deploy(self.__ws, service_name,
                                   [model, explainer_model], inference_config,
                                   deployment_config)
        else:
            service = AciWebservice(self.__ws, service_name)
            service.update(models=[model, explainer_model],
                           inference_config=inference_config)

        service.wait_for_deployment(show_output=True)
        print(service.state)
        print(service.get_logs())
def main(args):

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    deployment_config = LocalWebservice.deploy_configuration()

    model = Model(ws, name=args.model_workspace)

    # This deploys AND registers model (if not registered)
    service = Model.deploy(workspace=ws,
                           name=args.service_name,
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config)

    service.wait_for_deployment(True)
    print(service.state)
def create_inference_config(ws):
    inference_env_file = os.path.join(CODE_PATH, 'inference_env.yml')
    inference_env = amlutils.create_azureml_env(
        ws, consts.inference_environment_name, inference_env_file)
    inference_config = InferenceConfig(
        source_directory=CODE_PATH,
        entry_script='score.py',
        environment=inference_env)
    return inference_config
Exemplo n.º 8
0
    def deploy(self, model_id, locally):
        if locally:
            self.ctx.log('Local deployment step is not required for Azure..')
            return {'model_id': model_id}

        ws = AzureProject(self.ctx)._get_ws()
        experiment_name = self.ctx.config.get('experiment/name', None)
        if experiment_name is None:
            raise AzureException('Please specify Experiment name...')

        iteration, run_id = self._get_iteration(model_id)

        experiment = Experiment(ws, experiment_name)
        experiment_run = AutoMLRun(experiment=experiment, run_id=run_id)
        model_run = AutoMLRun(experiment=experiment, run_id=model_id)
        model_name = model_run.properties['model_name']
        self.ctx.log('Regestiring model: %s' % model_name)

        description = '%s-%s' % (model_name, iteration)
        model = experiment_run.register_model(model_name=model_name,
                                              iteration=iteration,
                                              description=description,
                                              tags=None)

        script_file_name = '.azureml/score_script.py'
        model_run.download_file('outputs/scoring_file_v_1_0_0.py',
                                script_file_name)

        # Deploying ACI Service
        aci_service_name = self._aci_service_name(model_name)
        self.ctx.log('Deploying AciWebservice %s ...' % aci_service_name)

        inference_config = InferenceConfig(
            environment=model_run.get_environment(),
            entry_script=script_file_name)

        aciconfig = AciWebservice.deploy_configuration(
            cpu_cores=1,
            memory_gb=2,
            tags={'type': "inference-%s" % aci_service_name},
            description="inference-%s" % aci_service_name)

        # Remove any existing service under the same name.
        try:
            Webservice(ws, aci_service_name).delete()
            self.ctx.log('Remove any existing service under the same name...')
        except WebserviceException:
            pass

        aci_service = Model.deploy(ws, aci_service_name, [model],
                                   inference_config, aciconfig)
        aci_service.wait_for_deployment(True)
        self.ctx.log('%s state %s' %
                     (aci_service_name, str(aci_service.state)))

        return {'model_id': model_id, 'aci_service_name': aci_service_name}
Exemplo n.º 9
0
    def _deploy_remotly(self, model_id, model_run, ws, experiment):
        from azureml.core.model import Model
        from azureml.core.model import InferenceConfig
        from azureml.core.webservice import Webservice
        from azureml.core.webservice import AciWebservice
        from azureml.exceptions import WebserviceException
        from azureml.train.automl.run import AutoMLRun

        # ws, experiment = self._get_experiment()
        iteration, run_id = self._get_iteration(model_id)

        experiment_run = AutoMLRun(experiment = experiment, run_id = run_id)
        model_name = model_run.properties['model_name']
        self.ctx.log('Registering model: %s' % model_id)

        description = '%s-%s' % (model_name, iteration)
        model = experiment_run.register_model(
            model_name = model_name, iteration=iteration,
            description = description, tags = None)

        script_file_name = '.azureml/score_script.py'
        model_run.download_file(
            'outputs/scoring_file_v_1_0_0.py', script_file_name)

        self._edit_score_script(script_file_name)

        # Deploying ACI Service
        aci_service_name = self._aci_service_name(model_name)
        self.ctx.log('Deploying AciWebservice %s ...' % aci_service_name)

        inference_config = InferenceConfig(
            environment = model_run.get_environment(),
            entry_script = script_file_name)

        aciconfig = AciWebservice.deploy_configuration(
            cpu_cores = 1,
            memory_gb = 2,
            tags = {'type': "inference-%s" % aci_service_name},
            description = "inference-%s" % aci_service_name)

        # Remove any existing service under the same name.
        try:
            Webservice(ws, aci_service_name).delete()
            self.ctx.log('Remove any existing service under the same name...')
        except WebserviceException:
            pass

        aci_service = Model.deploy(
            ws, aci_service_name, [model], inference_config, aciconfig)
        aci_service.wait_for_deployment(True)
        self.ctx.log('%s state %s' % (aci_service_name, str(aci_service.state)))

        return {'model_id': model_id, 'aci_service_name': aci_service_name}
Exemplo n.º 10
0
def update_deployed_model(ws, aci_service_name, model_name, mlapp_env,
                          entry_script):
    inference_config = InferenceConfig(source_directory=os.getcwd(),
                                       entry_script=entry_script,
                                       environment=mlapp_env)

    model = Model(ws, name=model_name)
    service = Webservice(name=aci_service_name, workspace=ws)
    service.update(models=[model], inference_config=inference_config)

    print(service.state)
    print(service.get_logs())
Exemplo n.º 11
0
def deploy(local, aks, aci, num_cores, mem_gb, compute_name):
    # Get the workspace
    ws = Workspace.from_config()
    # Create inference configuration based on the environment definition and the entry script
    # yolo = Environment.from_conda_specification(name="env", file_path="yolo.yml")
    yolo = Environment.from_pip_requirements(
        name="yolo", file_path="./deployed_requirements.txt")
    # yolo.save_to_directory('')
    yolo.register(workspace=ws)
    inference_config = InferenceConfig(entry_script="azure.py",
                                       environment=yolo,
                                       source_directory="yolov5")
    # Retrieve registered model
    model = Model(ws, id="lpr:1")
    deploy_target = None
    if local:
        # Create a local deployment, using port 8890 for the web service endpoint
        deployment_config = LocalWebservice.deploy_configuration(port=8890)
    elif aks:
        # Create a AKS deployment
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
        deploy_target = ComputeTarget(workspace=ws, name=compute_name)
        # if deploy_target.get_status() != "Succeeded":
        #     print(f"Deploy Target: {deploy_target.get_status()}")
        #     deploy_target.wait_for_completion(show_output=True)
    elif aks:
        # Create a AKS deployment
        deployment_config = AciWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
    else:
        raise NotImplementedError("Choose deploy target please")
    # Deploy the service
    print("Deploying:")
    service = Model.deploy(workspace=ws,
                           name="lpr",
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config,
                           overwrite=True,
                           deployment_target=deploy_target)
    # Wait for the deployment to complete
    print("Deploying:")
    service.wait_for_deployment(True)
    # Display the port that the web service is available on
    if local:
        print(service.port)
Exemplo n.º 12
0
def main():
    # get access to workspace
    try:
        ws = Workspace.from_config()
        print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
        print('Library configuration succeeded')
    except:
        print('Workspace not found')
        return

    # get model
    model = Model(ws, 'absa')

    # deploy model

    pip = [
        "azureml-defaults", "azureml-monitoring",
        "git+https://github.com/NervanaSystems/nlp-architect.git@absa",
        "spacy==2.1.4"
    ]

    myenv = CondaDependencies.create(pip_packages=pip)

    with open("absaenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    deploy_env = Environment.from_conda_specification('absa_env',
                                                      "absaenv.yml")
    deploy_env.environment_variables = {'NLP_ARCHITECT_BE': 'CPU'}

    inference_config = InferenceConfig(environment=deploy_env,
                                       entry_script="score.py")

    deploy_config = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        description='Aspect-Based Sentiment Analysis - Intel')
    print('Initiating deployment')
    deployment = Model.deploy(ws,
                              'absa-svc',
                              models=[model],
                              inference_config=inference_config,
                              deployment_config=deploy_config,
                              overwrite=True)

    deployment.wait_for_deployment(show_output=True)
    print('Getting Logs')
    deployment.get_logs()
    print('Done!')
Exemplo n.º 13
0
def main():
    try:
        ws = connectToWorkspace(TENANT_ID, APP_ID, SP_PASSWORD,
                                SUBSCRIPTION_ID, RESOURCE_GROUP,
                                WORKSPACE_NAME)
    except ProjectSystemException as err:
        print('Authentication did not work.')
        return json.dumps('ProjectSystemException')
    except Exception as err:
        print(err)
        sys.exit()
    print("connect")
    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')
    myenv.name = "myenv"
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("azure-storage")
    conda_dep.add_pip_package("azure-storage-file-datalake")
    myenv.python.conda_dependencies = conda_dep
    print("Environment Configured")
    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)

    aks_target_name = "raiaks"

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
def _create_inference_config(inference_env_name):
    try:
        ws=_establish_connection_to_aml_workspace()
    except Exception as e:
        print("failed to connect to workspce")
        raise e
    try:
        environment=Environment.get(workspace=ws,name=inference_env_name)
        inference_config=InferenceConfig(entry_script="score.py",
                                            environment=environment,
                                            source_directory=r'deployment')
        return inference_config
    except Exception as e:
        print("failed to create inference config")
        raise e
Exemplo n.º 15
0
    def deploy(self):
        myenv = CondaDependencies()
        myenv.add_pip_package("azureml-sdk")
        myenv.add_pip_package("joblib")
        myenv.add_pip_package("tensorflow")
        myenv.add_pip_package("Pillow")
        myenv.add_pip_package("azureml-dataprep[pandas,fuse]>=1.1.14")

        with open("diagnoz_env.yml", "w") as f:
            f.write(myenv.serialize_to_string())

        huml_env = Environment.from_conda_specification(
            name="diagnoz_env", file_path="diagnoz_env.yml")

        inference_config = InferenceConfig(entry_script="score.py",
                                           source_directory='.',
                                           environment=huml_env)
        print("file deployement : ")
        for root, dir_, files in os.walk(os.getcwd()):
            print("dir_", dir_)
            for filename in files:
                print("filename :", filename)

        aciconfig = AciWebservice.deploy_configuration(
            cpu_cores=1,
            memory_gb=1,
            tags={
                "data": "cancer-data",
                "method": "tensorflow"
            },
            description='Predicting cancer with tensorflow')

        try:
            AciWebservice(self.ws, self.config.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        model = self.ws.models[self.config.MODEL_NAME]

        service = Model.deploy(workspace=self.ws,
                               name=self.config.DEPLOY_SERVICE_NAME,
                               models=[model],
                               inference_config=inference_config,
                               deployment_config=aciconfig)

        service.wait_for_deployment(show_output=True)
        print("success deployement")
Exemplo n.º 16
0
def get_inference_config(environment_name, conda_file, entry_script):
    # Create the environment
    env = Environment(name=environment_name)

    conda_dep = CondaDependencies(conda_file)

    # Define the packages needed by the model and scripts
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("xgboost")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    return inference_config
Exemplo n.º 17
0
    def deploy(self):

        try:
            AciWebservice(self.ws, self.DEPLOY_SERVICE_NAME).delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        conda_dep = CondaDependencies()                                        
        conda_dep.add_pip_package("joblib")
        conda_dep.add_pip_package("torch")
        conda_dep.add_pip_package("torchvision")
        conda_dep.add_pip_package("azureml-sdk")
        conda_dep.add_pip_package("azure-storage-blob")
        conda_dep.add_pip_package("PyYAML")
        conda_dep.add_pip_package("scikit-learn")
        conda_dep.add_pip_package("matplotlib")
        conda_dep.add_pip_package("opencensus-ext-azure")
        
        
        shoes_designer_env_file = "shoes_designer_env.yml"
        with open(shoes_designer_env_file,"w") as f:
            f.write(conda_dep.serialize_to_string())

        shoes_designer_env = Environment.from_conda_specification(name="shoes_designer_env", file_path=shoes_designer_env_file)

        inference_config = InferenceConfig(entry_script="score.py", environment=shoes_designer_env)

        aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, 
                                                    memory_gb=2, 
                                                    tags={"method" : "torch"}, 
                                                    description='Generate shoes with torch')

        model = self.ws.models[self.MODEL_NAME]

        service = Model.deploy(workspace=self.ws, 
                            name=self.DEPLOY_SERVICE_NAME, 
                            models=[model], 
                            inference_config=inference_config, 
                            deployment_config=aciconfig,
                            overwrite=True)
        service.wait_for_deployment(show_output=True)

        print("success deployement")        

        return service
def main():
    load_dotenv()
    workspace_name = os.environ.get("WS_NAME")
    resource_group = os.environ.get("RG_NAME")
    subscription_id = os.environ.get("SUBSCRIPTION_ID")
    tenant_id = os.environ.get("TENANT_ID")
    app_id = os.environ.get("SP_APP_ID")
    app_secret = os.environ.get("SP_APP_SECRET")
    model_name = os.environ.get("MODEL_NAME")
    inference_config_file = os.environ.get("INFERENCE_CONFIG")
    deployment_aci_config = os.environ.get("DEPLOYMENT_ACI_CONFIG")
    conda_dep_yml = os.environ.get("CONDA_DEPENDENCIES")
    score_path = os.environ.get("SCORE_PATH")
    score_source_dir = os.environ.get("SCORE_SOURCE_DIR")
    aci_service_name = os.environ.get("SERVICE_NAME")

    # Get Azure machine learning workspace
    aml_workspace = get_workspace(workspace_name, resource_group,
                                  subscription_id, tenant_id, app_id,
                                  app_secret)

    inference_config = InferenceConfig(source_directory=score_source_dir,
                                       runtime="python",
                                       entry_script=score_path,
                                       conda_file=conda_dep_yml)

    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=2,
        memory_gb=4,
        tags={
            "model": "BERT",
            "method": "tensorflow"
        },
        description='Predict StackoverFlow tags with BERT')

    model = aml_workspace.models[model_name]

    aci_service = Model.deploy(aml_workspace,
                               aci_service_name, [model],
                               inference_config,
                               aciconfig,
                               overwrite=True)

    aci_service.wait_for_deployment(True)

    print(aci_service.state)
Exemplo n.º 19
0
def deploy(workspace,
           name,
           model,
           script,
           source_directory,
           environment=None,
           target='local',
           cpu_cores=1,
           memory_gb=1,
           compute_target_name=None):
    inference_config = InferenceConfig(entry_script=script,
                                       source_directory=source_directory,
                                       environment=environment)

    if target == 'local':
        deployment_config = LocalWebservice.deploy_configuration(port=8890)
    elif target == 'aci':
        deployment_config = AciWebservice.deploy_configuration(
            cpu_cores=cpu_cores, memory_gb=memory_gb)
    elif target == 'aks':
        if compute_target_name is None:
            print("compute_target_name required when target='aks'")
            return None
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=cpu_cores,
            memory_gb=memory_gb,
            compute_target_name=compute_target_name,
            auth_enabled=False)

    try:
        service = Webservice(workspace, name)
    except WebserviceException:
        service = None

    if service is None:
        service = Model.deploy(workspace, name, [model], inference_config,
                               deployment_config)
    else:
        print(
            "Existing service with that name found, updating InferenceConfig\n"
            "If you meant to redeploy or change the deployment option, first "
            "delete the existing service.")
        service.update(models=[model], inference_config=inference_config)
    return service
def main():

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1,
        auth_enabled=True,  # this flag generates API keys to secure access
        memory_gb=6,
        location="westus",
        tags={
            'name': 'yolov3_full',
            'framework': 'Keras'
        },
        description='Keras YOLOv3 full size for object detection')

    model = Model(ws, name='mixdata_trained_weights.h5')

    # This deploys AND registers model (if not registered)
    service = Model.deploy(workspace=ws,
                           name='keras-yolov3-service',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=aciconfig)

    # This just deploys and does not register
    # service = Webservice.deploy_from_model(ws,
    #                             name='keras-yolov3-service',
    #                             models=[model],
    #                             deployment_config=aciconfig)

    service.wait_for_deployment(True)
    print(service.state)
Exemplo n.º 21
0
def main(name, model):
    workspace = Workspace.from_config()
    model = Model(workspace, name=model)

    root_folder = Path(__file__).parent.parent

    deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                           memory_gb=1)

    inference_config = InferenceConfig(entry_script='customer_churn/score.py',
                                       source_directory=root_folder)

    webservice = Model.deploy(workspace=workspace,
                              name=name,
                              models=[model],
                              deployment_config=deployment_config,
                              inference_config=inference_config)

    webservice.wait_for_deployment(show_output=True)
Exemplo n.º 22
0
def main(model_name="deploy", model_version=None, deployment_name="deploy"):
    """
    Return a AciWebservice deploy config
    """
    environment = get_environment(
        name=deployment_name,
        file_path="nd00333/model/deploy/environment.yml",
    )
    logger.info(msg="main", extra={"environment": environment})

    inference_config = InferenceConfig(
        source_directory="nd00333",
        entry_script="model/deploy/score.py",
        environment=environment,
    )
    logger.info(msg="main", extra={"inference_config": inference_config})

    workspace = package_utils.get_workspace()

    deployment_config = AciWebservice.deploy_configuration(
        cpu_cores=1.0,
        memory_gb=8.0,
        auth_enabled=True,
        enable_app_insights=True,
        collect_model_data=False,
    )
    logger.info(msg="main", extra={"deployment_config": deployment_config})

    model = Model(workspace, name=model_name, version=model_version)
    logger.info(msg="main", extra={"model": model})

    service = Model.deploy(
        workspace,
        deployment_name,
        [model],
        inference_config,
        deployment_config,
        overwrite=True,
    )
    logger.info(msg="main", extra={"service": service})

    return service
Exemplo n.º 23
0
def main():
    ws = Workspace.get(name=WORKSPACE_NAME,
                       subscription_id=SUBSCRIPTION_ID,
                       resource_group=RESOURCE_GROUP)

    print("connect")

    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.from_conda_specification(name='myenv',
                                                 file_path="environment.yml")
    myenv.docker.base_image = "mcr.microsoft.com/mmlspark/release"
    myenv.inferencing_stack_version = 'latest'
    print("Environment Configured")

    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)
    aks_target_name = AKS_CLUSTER_NAME

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
Exemplo n.º 24
0
def main():
    workspace_name = os.environ['AML_WORKSPACE_NAME']
    resource_group = os.environ['RESOURCE_GROUP']
    subscription_id = os.environ['SUBSCRIPTION_ID']

    spn_credentials = {
        'tenant_id': os.environ['TENANT_ID'],
        'service_principal_id': os.environ['SPN_ID'],
        'service_principal_password': os.environ['SPN_PASSWORD'],
    }

    aml_interface = AMLInterface(spn_credentials, subscription_id,
                                 workspace_name, resource_group)

    scoring_script_path = os.path.join(__here__, 'score.py')
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENV_NAME)
    service_name = 'aml-pipeline-deploy-3'
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    deploy(aml_interface, inference_config, service_name)
Exemplo n.º 25
0
def get_config(entry_script):
    # Create the environment
    env = Environment(name="tensorflow_env")

    conda_dep = CondaDependencies()

    # Define the packages needed by the model and scripts
    conda_dep.add_conda_package("tensorflow")

    # You must list azureml-defaults as a pip dependency
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("keras")
    conda_dep.add_pip_package("pandas")

    # Adds dependencies to PythonSection of myenv
    env.python.conda_dependencies = conda_dep

    inference_config = InferenceConfig(entry_script=entry_script,
                                       environment=env)

    print('Configuração do Endpoint retornada')
    return inference_config
Exemplo n.º 26
0
def main(args, ws):
    dir = os.path.dirname(os.path.abspath(__file__))
    ic = InferenceConfig(runtime='python',
                         source_directory=dir,
                         entry_script='score.py',
                         conda_file='environment.yml')

    dc = AciWebservice.deploy_configuration(cpu_cores=args.cores,
                                            memory_gb=args.memory)
    m = ws.models[args.model_name]

    # Remove any existing service under the same name.
    try:
        Webservice(ws, args.service_name).delete()
        print(f'Deleted webservice with name {args.service_name}.')
    except WebserviceException:
        pass

    service = Model.deploy(ws, args.service_name, [m], ic, dc)
    service.wait_for_deployment(True)
    print(service.state)
    print(service.get_logs())
Exemplo n.º 27
0
def deploy_models():
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        tags={
            "data": "Earthquake",
            "method": "sklearn"
        },
        description='Predict aftershock situation '
        'using linear models in sklearn')

    # env = Environment('aftershock-env')
    # cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]',
    #                                             'azureml-defaults'],
    #                               conda_packages=['scikit-learn==0.24.2'])
    # env.python.conda_dependencies = cd
    env = Environment.get(workspace=ws, name='aftershock-env')

    inference_config = InferenceConfig(entry_script="predict.py",
                                       environment=env)
    model = Model(ws, "aftershock_model")

    # service_name = 'sklearn-aftershock-svc-' + str(uuid.uuid4())[:4]
    service_name = "sklearn-aftershock-svc-f41b"
    service = Model.deploy(workspace=ws,
                           name=service_name,
                           models=[model],
                           overwrite=True,
                           inference_config=inference_config,
                           deployment_config=aciconfig)

    service.wait_for_deployment(show_output=True)
    print(service.get_logs())
    print(service.scoring_uri)
    print("service keys")
    print(service.get_keys())
    print("service token")
    print(service.get_token())
Exemplo n.º 28
0
def file_to_inference_config(workspace, inference_config_file, description):
    with open(inference_config_file) as inference_file_stream:
        inference_config_obj = file_stream_to_object(inference_file_stream)

        # Retrieve Environment object from the name in the InferenceConfig file
        if 'environment' not in inference_config_obj:
            raise OMLException("need to specify environment in --deploy-config-file")
        environment_name = inference_config_obj.get('environment')["name"]
        environment = Environment.get(workspace, name=environment_name)

        inference_config = InferenceConfig(
            entry_script=inference_config_obj.get('entryScript'),
            runtime=inference_config_obj.get('runtime'),
            conda_file=inference_config_obj.get('condaFile'),
            extra_docker_file_steps=inference_config_obj.get('extraDockerfileSteps'),
            source_directory=inference_config_obj.get('sourceDirectory'),
            enable_gpu=inference_config_obj.get('enableGpu'),
            base_image=inference_config_obj.get('baseImage'),
            base_image_registry=inference_config_obj.get('baseImageRegistry'),
            cuda_version=inference_config_obj.get('cudaVersion'),
            environment=environment,
            description=description)
        return inference_config
Exemplo n.º 29
0
    def deploy_local(self):

        try:
            LocalWebservice(self.ws, "test").delete()
            print("webservice deleted")
        except WebserviceException:
            pass

        shoes_designer_env = Environment('shoes_designer_env')
        shoes_designer_env.python.conda_dependencies.add_pip_package("joblib")
        shoes_designer_env.python.conda_dependencies.add_pip_package("torch")
        shoes_designer_env.python.conda_dependencies.add_pip_package("torchvision")
        shoes_designer_env.python.conda_dependencies.add_pip_package("azure-storage-blob")
        shoes_designer_env.python.conda_dependencies.add_pip_package("azureml-sdk")
        shoes_designer_env.python.conda_dependencies.add_pip_package("PyYAML")
        shoes_designer_env.python.conda_dependencies.add_pip_package("scikit-learn")
        shoes_designer_env.python.conda_dependencies.add_pip_package("matplotlib")
        conda_dep.add_pip_package("opencensus-ext-azure")

        # explicitly set base_image to None when setting base_dockerfile
        shoes_designer_env.docker.base_image = None
        shoes_designer_env.docker.base_dockerfile = "FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04\nRUN echo \"this is test\""
        shoes_designer_env.inferencing_stack_version = "latest"

        inference_config = InferenceConfig(entry_script="score.py",environment=shoes_designer_env)
        
        model = self.ws.models[self.MODEL_NAME]

        # This is optional, if not provided Docker will choose a random unused port.
        deployment_config = LocalWebservice.deploy_configuration(port=6789)

        local_service = Model.deploy(self.ws, "test", [model], inference_config, deployment_config)

        local_service.wait_for_deployment()
        print("success deployement")
        
        return local_service
Exemplo n.º 30
0
def deploy():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name", required=True)
    parser.add_argument("--model_path", required=True)
    args = parser.parse_args()

    print(f"model_name : {args.model_name}")
    print(f"model_path: {args.model_path}")

    run = Run.get_context()
    ws = run.experiment.workspace

    model = Model.register(workspace=ws,
                           model_path=args.model_path,
                           model_name=args.model_name)

    print("Registered version {0} of model {1}".format(model.version,
                                                       model.name))

    inference_config = InferenceConfig(
        entry_script='score.py',
        runtime='python',
        conda_file='conda.yml',
        extra_docker_file_steps='extra_docker_steps',
        source_directory='server_files/')
    deployment_config = AciWebservice.deploy_configuration(cpu_cores=0.1,
                                                           memory_gb=0.5,
                                                           auth_enabled=True)

    try:
        service = AciWebservice(ws, "testscorescriptauto")
        service.update(models=[model])
        print("EXISTING ENDPOINT FOUND: MODEL UPDATED")
    except Exception:
        Model.deploy(ws, "testscorescriptauto", [model], inference_config,
                     deployment_config)
        print("NO EXISTING ENDPOINT FOUND: DEPLOYED MODEL TO NEW ENDPOINT")