Пример #1
0
def downloadPickles(ws, modelName, outputPath="./pickles", modelVer=None):
    if modelVer == 'best':
        bestModel = None
        maxAcc = -1
        for model in Model.list(ws, modelName, ["accuracy"]):
            modelAcc = float(model.tags["accuracy"])
            if modelAcc > maxAcc:
                bestModel = model
                maxAcc = modelAcc

        print(f"### Best model with highest accuracy of {maxAcc} found")

        if not bestModel:
            model = Model(ws, modelName)
            print("### WARNING! No best model found, using latest instead")
    elif modelVer is not None:
        model = Model(ws, modelName, version=modelVer)
    else:
        model = Model(ws, modelName)

    print(f"### Using model version {model.version}")
    # Echo'ing out this magic string sets an output variable in Azure DevOps pipeline
    # Set AZML_MODEL_VER for use by subsequent steps
    print(f"##vso[task.setvariable variable=AZML_MODEL_VER]{model.version}")

    # These are special tags, lets us get back to the run that created the model
    try:
        runId = model.tags['aml-runid']
        experimentName = model.tags['aml-experiment']
    except:
        print(
            "### ERROR! Model missing `aml-runid` and `aml-experiment` tags, Can't continue!"
        )
        exit()

    exp = Experiment(workspace=ws, name=experimentName)
    run = Run(exp, runId)
    if run.status != "Completed":
        print(f'### ERROR! Run {runId} did not complete!')
        return

    print(f'### Will download from run {runId} in {experimentName}')

    # Now we can get all the files created with the run, grab all the .pkls
    for f in run.get_file_names():
        if f.endswith('.pkl'):
            output_file_path = os.path.join(outputPath, f.split('/')[-1])
            print('### Downloading from {} to {} ...'.format(
                f, output_file_path))
            run.download_file(name=f, output_file_path=output_file_path)

    # Add some extra metadata, handy to have
    metadata = {
        'name': model.name,
        'version': model.version,
        'tags': model.tags
    }
    with open(f"{outputPath}/metadata.json", 'w') as metadata_file:
        print(f"### Storing metadata in {outputPath}/metadata.json")
        json.dump(metadata, metadata_file)
Пример #2
0
    def __deploy_model(self):
        service_name = self.__args.service_name

        model = Model(self.__ws, self.__args.model_name)
        explainer_model = Model(self.__ws, self.__args.explainer_model_name)
        myenv = Environment.from_conda_specification(
            name=self.__config.get('DEPLOY', 'ENV_NAME'),
            file_path=self.__config.get('DEPLOY', 'ENV_FILE_PATH'))
        inference_config = InferenceConfig(
            entry_script=self.__config.get('DEPLOY', 'SCORE_PATH'),
            environment=myenv,
            source_directory=self.__config.get('DEPLOY',
                                               'DEPENDENCIES_DIRECTORY'))

        if not self.__args.update_deployment:
            deployment_config = AciWebservice.deploy_configuration(
                cpu_cores=self.__config.getint('DEPLOY', 'ACI_CPU'),
                memory_gb=self.__config.getint('DEPLOY', 'ACI_MEM'),
                collect_model_data=True,
                enable_app_insights=True)
            service = Model.deploy(self.__ws, service_name,
                                   [model, explainer_model], inference_config,
                                   deployment_config)
        else:
            service = AciWebservice(self.__ws, service_name)
            service.update(models=[model, explainer_model],
                           inference_config=inference_config)

        service.wait_for_deployment(show_output=True)
        print(service.state)
        print(service.get_logs())
def main(args):

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    deployment_config = LocalWebservice.deploy_configuration()

    model = Model(ws, name=args.model_workspace)

    # This deploys AND registers model (if not registered)
    service = Model.deploy(workspace=ws,
                           name=args.service_name,
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config)

    service.wait_for_deployment(True)
    print(service.state)
def main(args):

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace and download config.json from Azure Portal
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    model = Model(ws, name=args.model_workspace)

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    package = Model.package(ws, [model],
                            inference_config,
                            generate_dockerfile=True)
    package.wait_for_creation(show_output=True)
    # Download the package.
    package.save("./" + args.out_dir)
    # Get the Azure container registry that the model/Dockerfile uses.
    acr = package.get_container_registry()
    print("Address:", acr.address)
    print("Username:"******"Password:", acr.password)
Пример #5
0
def init():
    global sb_client, queue_client, aml_run, LGBM_MODEL, topic
    aml_run = Run.get_context()
    ws = aml_run.experiment.workspace
    keyvault = ws.get_default_keyvault()

    con_str = keyvault.get_secret('servicebustopic')

    sb_client = ServiceBusClient.from_connection_string(con_str)
    # queue_client = sb_client.get_queue("landing")
    #Loading model from AML Workspace

    model_name = "porto_seguro_safe_driver_model"
    model = Model(ws, model_name)
    model.download("model", exist_ok=True)
    model_path = os.path.join("model", model_name)
    LGBM_MODEL = joblib.load(model_path)
    parser = argparse.ArgumentParser()

    parser.add_argument("--topic",
                        type=str,
                        dest="topic",
                        required=True,
                        help="name of topic")

    args, _ = parser.parse_known_args()
    # Set number of tasks equal to total number of cores in the cluster
    topic = args.topic
Пример #6
0
def _get_model_id(workspace, source_model):
    """
    Helper method to get model id.

    :param workspace:
    :type workspace: azureml.core.workspace.Workspace
    :param source_model:
    :type source_model: azureml.core.model or model id str
    :return: model id str
    :rtype: str
    """

    if type(source_model) is str:
        try:
            registered_model = Model(workspace, id=source_model)
        except WebserviceException:
            raise ModelNotFoundException('model not found')

        return registered_model.id

    if type(source_model) is Model:
        return source_model.id

    raise NotImplementedError(
        'source_model must either be of type azureml.core.Model or a str of model id.'
    )
Пример #7
0
def main():
    print('Cargando configuracion workspace...')
    ws = Workspace.from_config()

    print('Obteniendo modelo...')
    model = Model(ws, 'yolov3-tf')

    print("Configurando Objects...")
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=2,
        memory_gb=2,
        tags={"data": "solo yolov3 tensorflow"},
        description='yolov3 y tensorflow',
        dns_name_label='ceibatest')

    inference_config = InferenceConfig(entry_script="score.py",
                                       source_directory="../azure",
                                       conda_file='conda-cpu.yml',
                                       runtime='python')

    print("Desplegando...")
    service = Model.deploy(workspace=ws,
                           name='yolov3-tf-deploy',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=aciconfig,
                           overwrite=True)

    service.wait_for_deployment(show_output=True)
    url = service.scoring_uri
    print(url)
Пример #8
0
def get_model(
              model_name,
              model_version=None,
              workspace=None,
              tag_name=None,
              tag_value=None
                              ):
    if workspace is None:
        print('no workspace is provided,will get the current available workspace')
        workspace = get_current_workspace()
    
    tags = None
    if tag_name is not None or tag_value is not None:
        if tag_name is None or tag_value is None:
            raise ValueError('both tag_name and tag_value must be provided with a value')
        tags = [[tag_name,tag_value]]
    
    model = None
    if model_version is not None:
        model = Model(workspace=workspace,
                      name=model_name,
                      tags=tags,
                      version=model_version)
    else:
        models = Model.list(workspace=workspace,
                            name=model_name,
                            tags=tags,
                            latest=True)
        if len(models) == 1:
            model = models[0]
        if len(models) > 1:
            raise Exception('only one model expected!')
        
    return model
Пример #9
0
def run():
    print("entered run")
    variables_received = "sub_id: {}, rg: {}, work_name: {}, state: {}, author: {}, model_name: {}" \
                            .format(resolve_sub_id(),
                                    resolve_rg(),
                                    resolve_workspace_name(),
                                    resolve_state(),
                                    resolve_author(),
                                    resolve_model_name())
    print(variables_received)

    az_ws = Workspace(resolve_sub_id(), resolve_rg(), resolve_workspace_name())
    print("initialized workspace")
    #Get & Download model
    model = Model(az_ws,
                  name=resolve_model_name(),
                  tags={
                      "state": resolve_state(),
                      "created_by": resolve_author()
                  })
    print("initialized model")
    model.download(target_dir="./assets/")
    print("downloaded model assets")
    #TODO: remove workaround for ml sdk dropping assets into /assets/dacrook folder when files dropped to consistent location
    for dir_p, _, f_n in walk("./assets"):
        for f in f_n:
            abs_path = os.path.abspath(os.path.join(dir_p, f))
            shutil.move(abs_path, "./assets/" + f)

    #Configure Image
    my_env = CondaDependencies.create(conda_packages=["numpy", "scikit-learn"])
    with open("myenv.yml", "w") as f:
        f.write(my_env.serialize_to_string())
    image_config = ContainerImage.image_configuration(
        execution_script="score.py",
        runtime="python",
        conda_file="myenv.yml",
        dependencies=["assets", "inference_code"],
        tags={
            "state": resolve_state(),
            "created_by": resolve_author()
        })
    print("configured image")
    #TODO: use this once model is dropped to a consistent location
    #    image = Image.create(workspace = az_ws, name=resolve_image_name(), models=[model], image_config = image_config)
    image = Image.create(workspace=az_ws,
                         name=resolve_image_name(),
                         models=[model],
                         image_config=image_config)
    image.wait_for_creation()
    print("created image")
    if (image.creation_state != "Succeeded"):
        raise Exception("Failed to create image.")
    print("image location: {}".format(image.image_location))
    artifacts = {"image_location": image.image_location}
    if (not os.path.exists("/artifacts/")):
        os.makedirs("/artifacts/")
    with open("/artifacts/artifacts.json", "w") as outjson:
        json.dump(artifacts, outjson)
Пример #10
0
def model_already_registered(model_name, run_id, exp):
    model_list = Model(workspace=exp.workspace, run_id=run_id, name=model_name)
    if len(model_list) >= 1:
        error = f'model: {model_name} with run_id: {run_id}  has already been registered at workspace: {exp.workspace}'
        print(error)
        raise Exception(error)
    else:
        print('model is not registered for this workspace')
def make_predictions(
    transformed_data_path: str,
    original_data_path: str,
    inference_path: str,
):
    """Loads the datasets already transformed and make the predictions.

    Args:
        transformed_data_path (str): Path to the transformed dataset ready to be
        feed to the model for prediction.
        original_data_path (str): Path to the original dataset.
        inference_path (str): Path to the predictions.
    """
    # helper
    aml_helper = AmlCustomHelper()

    # load dataset
    logger.info("Load the training datasets")
    X = pd.read_parquet(transformed_data_path)
    logger.info(f"X shape:\t{X.shape}")

    # load original dataset
    logger.info("Load the original datasets")
    original_data = pd.read_parquet(original_data_path)
    logger.info(f"original_data shape:\t{original_data.shape}")

    # download registered model
    logger.info("Download Azureml model")
    az_model = Model(aml_helper.ws, MODEL_NAME)

    logger.info(f"aml_helper.ASSETS_DIR:\t{aml_helper.ASSETS_DIR}")
    az_model.download(
        target_dir=f"{'/'.join(aml_helper.ASSETS_DIR.split('/')[0:-1])}",
        exist_ok=True,
    )

    # load model
    logger.info("Load Azureml model")
    model = joblib.load(f"{aml_helper.ASSETS_DIR}/{MODEL_NAME}")
    logger.info(f"Model:\t{model}")

    # batch inference - get predictions
    pred = model.predict(X)

    logger.info(f"type:\t{type(pred)}")
    logger.info(f"pred.shape:\t{pred.shape}")

    # transform the predictions to DataFrame
    pred = pd.DataFrame(pred, columns=["prediction"])
    logger.info(pred.head())

    # concat the predictions with original dataset
    df_pred_and_orig = pd.concat([original_data, pred], axis=1)
    logger.info(f"df_pred_and_orig.head():\t{df_pred_and_orig.head()}")

    # persist predictions
    logger.info("persist results")
    df_pred_and_orig.to_csv(inference_path)
Пример #12
0
def register_model_sk(model, foldername, modelname):
    ws = get_ws_from_run()

    mlflow.sklearn.log_model(model,
                             foldername,
                             registered_model_name=modelname)
    model_id = Model(ws, modelname).id

    return model_id
Пример #13
0
def init():
    global model
    azmodel = Model(get_workspace(), name='fashionMNIST')
    model_path = './model'
    model_name = os.path.join(model_path, 'fashionMNIST.h5')
    os.makedirs(model_path, exist_ok=True)
    if os.path.exists(model_name):
        os.remove(model_name)
    azmodel.download(target_dir=model_path, exists_ok=True)
    model = keras.models.load_model(model_name)
Пример #14
0
def downloadPickles(ws, modelName, outputPath="./pickles", modelVer=None):
    if modelVer == 'best':
        bestModel = None
        maxAcc = -1
        for model in Model.list(ws, modelName, ["accuracy"]):
            modelAcc = float(model.tags["accuracy"])
            if modelAcc > maxAcc:
                bestModel = model
                maxAcc = modelAcc

        print(f"### Best model with highest accuracy of {maxAcc} found")

        if not bestModel:
            model = Model(ws, modelName)
            print("### WARNING! No best model found, using latest instead")
    elif modelVer is not None:
        model = Model(ws, modelName, version=modelVer)
    else:
        model = Model(ws, modelName)

    print(f"### Using model version {model.version}")
    # Echo'ing out this magic string sets an output variable in Azure DevOps pipeline
    # Set AZML_MODEL_VER for use by subsequent steps
    print(f"##vso[task.setvariable variable=AZML_MODEL_VER]{model.version}")

    # The uploaded files will be in a subfolder "outputs", so download files to a temp location
    # Then move to target output folder
    model.download(f"{tempfile.gettempdir()}/aml", exist_ok=True)
    output_files = os.listdir(f"{tempfile.gettempdir()}/aml/outputs")
    os.makedirs(outputPath, exist_ok=True)
    for output_file in output_files:
        os.rename(f"{tempfile.gettempdir()}/aml/outputs/{output_file}",
                  f"{outputPath}/{output_file}")

    # Add some extra metadata, handy to have
    metadata = {
        'name': model.name,
        'version': model.version,
        'tags': model.tags
    }
    with open(f"{outputPath}/metadata.json", 'w') as metadata_file:
        print(f"### Storing metadata in {outputPath}/metadata.json")
        json.dump(metadata, metadata_file)
Пример #15
0
def downloadModel(ws, args, folders):
    file_path = os.path.join(folders.output_folder, args.modelFileName)
    if (os.path.isfile(file_path)):
        print("Model file already exists in: {0}".format(file_path))
    else:
        print("Model file does not exist in: {0}".format(file_path))
        model = Model(ws, name=args.modelName)
        model.download(target_dir=folders.output_folder, exist_ok=False)
    statinfo = os.stat(file_path)
    if (args.verbose):
        print(statinfo)
Пример #16
0
    def __create_datadrift_detector(self):
        model = Model(self.__ws, self.__model_name)

        try:
            self.monitor = DataDriftDetector.create_from_model(self.__ws, model.name, model.version, self.__services, 
                                                        frequency='Day',
                                                        compute_target=self.__compute_name)
        except KeyError:
            self.monitor = DataDriftDetector.get(self.__ws, model.name, model.version)

        self.monitor.enable_schedule(wait_for_completion=True)
Пример #17
0
def update_deployed_model(ws, aci_service_name, model_name, mlapp_env,
                          entry_script):
    inference_config = InferenceConfig(source_directory=os.getcwd(),
                                       entry_script=entry_script,
                                       environment=mlapp_env)

    model = Model(ws, name=model_name)
    service = Webservice(name=aci_service_name, workspace=ws)
    service.update(models=[model], inference_config=inference_config)

    print(service.state)
    print(service.get_logs())
Пример #18
0
def main():
    # retrieve argument configured through script_params in estimator
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name", dest='model_name', type=str,
                        help="Name of the model to retrieve from Workspace")
    args = parser.parse_args()

    # Get the current run
    run = Run.get_context()

    # Get metrics from current model and compare with the metrics
    # of the new model. The metrics of the new model can be retrieved
    # from run.parent.get_metrics, which were created in training_model.py
    metrics = ['Accuracy', 'Precision', 'Recall', 'F1-score']
    current_metrics = {}
    new_metrics = {}

    try:
        workspace = run.experiment.workspace
        # Get latest model
        model = Model(workspace, args.model_name)

        for key in metrics:
            current_metrics[key] = float(model.tags.get(key))
            new_metrics[key] = run.parent.get_metrics(key).get(key)
            run.log(key, 'current(ver '
                         + str(model.version)
                         + ')='
                         + model.tags.get(key)
                         + ' new='
                         + str(run.parent.get_metrics(key).get(key))
                    )

    except WebserviceException as e:
        if('ModelNotFound' in e.message):
            model = None
        else:
            raise

    # Perform comparison. Just do a simple comparison:
    # If Accuracy improves, proceed next step to register model.
    if(model is not None):
        if(new_metrics['Accuracy'] >= current_metrics['Accuracy']):
            run.log("Result", "New Accuracy is as good as current, \
                will proceed to register new model.")
        else:
            run.log("Result", "New Accuracy is worse than current, \
                will not register model. Processing cancelled.")
            run.parent.cancel()
    else:
        run.log("Result", "This is the first model, will proceed \
            to register the model.")
def deploy(local, aks, aci, num_cores, mem_gb, compute_name):
    # Get the workspace
    ws = Workspace.from_config()
    # Create inference configuration based on the environment definition and the entry script
    # yolo = Environment.from_conda_specification(name="env", file_path="yolo.yml")
    yolo = Environment.from_pip_requirements(
        name="yolo", file_path="./deployed_requirements.txt")
    # yolo.save_to_directory('')
    yolo.register(workspace=ws)
    inference_config = InferenceConfig(entry_script="azure.py",
                                       environment=yolo,
                                       source_directory="yolov5")
    # Retrieve registered model
    model = Model(ws, id="lpr:1")
    deploy_target = None
    if local:
        # Create a local deployment, using port 8890 for the web service endpoint
        deployment_config = LocalWebservice.deploy_configuration(port=8890)
    elif aks:
        # Create a AKS deployment
        deployment_config = AksWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
        deploy_target = ComputeTarget(workspace=ws, name=compute_name)
        # if deploy_target.get_status() != "Succeeded":
        #     print(f"Deploy Target: {deploy_target.get_status()}")
        #     deploy_target.wait_for_completion(show_output=True)
    elif aks:
        # Create a AKS deployment
        deployment_config = AciWebservice.deploy_configuration(
            cpu_cores=num_cores,
            memory_gb=mem_gb,
            compute_target_name=compute_name)
    else:
        raise NotImplementedError("Choose deploy target please")
    # Deploy the service
    print("Deploying:")
    service = Model.deploy(workspace=ws,
                           name="lpr",
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=deployment_config,
                           overwrite=True,
                           deployment_target=deploy_target)
    # Wait for the deployment to complete
    print("Deploying:")
    service.wait_for_deployment(True)
    # Display the port that the web service is available on
    if local:
        print(service.port)
Пример #20
0
def main():
    # get access to workspace
    try:
        ws = Workspace.from_config()
        print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
        print('Library configuration succeeded')
    except:
        print('Workspace not found')
        return

    # get model
    model = Model(ws, 'absa')

    # deploy model

    pip = [
        "azureml-defaults", "azureml-monitoring",
        "git+https://github.com/NervanaSystems/nlp-architect.git@absa",
        "spacy==2.1.4"
    ]

    myenv = CondaDependencies.create(pip_packages=pip)

    with open("absaenv.yml", "w") as f:
        f.write(myenv.serialize_to_string())

    deploy_env = Environment.from_conda_specification('absa_env',
                                                      "absaenv.yml")
    deploy_env.environment_variables = {'NLP_ARCHITECT_BE': 'CPU'}

    inference_config = InferenceConfig(environment=deploy_env,
                                       entry_script="score.py")

    deploy_config = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        description='Aspect-Based Sentiment Analysis - Intel')
    print('Initiating deployment')
    deployment = Model.deploy(ws,
                              'absa-svc',
                              models=[model],
                              inference_config=inference_config,
                              deployment_config=deploy_config,
                              overwrite=True)

    deployment.wait_for_deployment(show_output=True)
    print('Getting Logs')
    deployment.get_logs()
    print('Done!')
Пример #21
0
def register_model(**parameters):
    zsh('az ml model list --output json > az_ml_model_list.json')
    model_name = parameters['model_name']
    # model_id = parameters['id']
    # model_version = parameters['version']
    ws = parameters['workspace']
    with open('az_ml_model_list.json') as models_json:
        models = json.load(models_json)
        matched_models = [
            model for model in models if model['name'] == model_name
        ]
        if len(matched_models) == 1:
            print('Found the model\n')
            model = Model(workspace=ws, name=model_name)
        elif len(matched_models) == 0:
            print('Provided model {} has not been found\n'.format(model_name))
            print('registering new model in Azure ...\n')
            model = Model.register(**model_registration_parameters)
        elif len(matched_models) > 1:
            model_name = matched_models[0]['name']
            model = Model(workspace=ws, name=model_name)

    zsh('rm az_ml_model_list.json')
    return model
Пример #22
0
    def __check_models_metrics(self, datasets, metric):
        models = Model.list(self.__ws, self.__args.model_name)
        if len(models) == 0:
            self.__register_model(datasets, metric)
        else:
            model_metric = Model(
                self.__ws,
                self.__args.model_name).tags[self.__default_metric_name]
            last_metric = ast.literal_eval(model_metric)

            if self.__compare_metrics(current_metric=last_metric,
                                      new_metric=metric):
                self.__register_model(datasets, metric)
            else:
                raise Exception(
                    "The new model perfomance is worse than the last model")
Пример #23
0
def deployWebservice(ws, args, folders):
    # this section requries that the processing is done in the directory where the execution script and the conda_file resides
    os.chdir(folders.script_folder)
    model = Model(ws, args.modelName)
    aciconfig = AciWebservice.deploy_configuration(cpu_cores=args.cpuCores,
                                                   memory_gb=args.memoryGB)
    # configure the image
    image_config = ContainerImage.image_configuration(
        execution_script=args.scoringScript,
        runtime="python",
        conda_file=args.environmentFileName)
    service = Webservice.deploy_from_model(workspace=ws,
                                           name=args.webserviceName,
                                           deployment_config=aciconfig,
                                           models=[model],
                                           image_config=image_config)
    service.wait_for_deployment(show_output=True)
    return service.scoring_uri
Пример #24
0
def main(name, model):
    workspace = Workspace.from_config()
    model = Model(workspace, name=model)

    root_folder = Path(__file__).parent.parent

    deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                           memory_gb=1)

    inference_config = InferenceConfig(entry_script='customer_churn/score.py',
                                       source_directory=root_folder)

    webservice = Model.deploy(workspace=workspace,
                              name=name,
                              models=[model],
                              deployment_config=deployment_config,
                              inference_config=inference_config)

    webservice.wait_for_deployment(show_output=True)
def _start_deploy_model(inference_config,deployment_config,model_name,model_version,deployment_name):
    try:
        ws=_establish_connection_to_aml_workspace()
    except Exception as e:
        print("failed to connect to workspce")
        raise e
    try:
        model=Model(workspace=ws,name=model_name,version=model_version)
        service=model.deploy(workspace=ws,
                                name=deployment_name,
                                models=[model],
                                inference_config=inference_config,
                                deployment_config=deployment_config,
                                overwrite=True)
        service.wait_for_deployment()
        print(service.state)
        print("Deployed at {}".format(service.scoring_uri))
    except Exception as e:
        raise e
    def main(self):
        dataset = self.__get_dataset(self.__args.dataset_name)
        model = self.__load_model()
        df = dataset.to_pandas_dataframe()

        X_raw, Y, A, X = self.__transform_df(df)
        X_train, X_test, Y_train, Y_test, A_train, A_test = self.__df_train_split(
            X_raw, Y, A, X)

        Y_pred = model.predict(X_test)

        content = {
            "Y_pred": Y_pred,
            "Y_test": Y_test,
            "A_test": A_test,
            "model_id": Model(self.__ws, self.__args.fitted_model_name).id
        }

        self.__set_fairlearn_dict_as_pipeline_output(content)
def main():

    # Define workspace object
    try:
        ws = Workspace.from_config(path='deploy/.azureml/config.json')
    # Need to create the workspace
    except Exception as err:
        print('No workspace.  Check for deploy/.azureml/config.json file.')
        assert False

    inference_config = InferenceConfig(runtime="python",
                                       entry_script="score.py",
                                       conda_file="keras_env.yml",
                                       source_directory="./deploy")

    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1,
        auth_enabled=True,  # this flag generates API keys to secure access
        memory_gb=6,
        location="westus",
        tags={
            'name': 'yolov3_full',
            'framework': 'Keras'
        },
        description='Keras YOLOv3 full size for object detection')

    model = Model(ws, name='mixdata_trained_weights.h5')

    # This deploys AND registers model (if not registered)
    service = Model.deploy(workspace=ws,
                           name='keras-yolov3-service',
                           models=[model],
                           inference_config=inference_config,
                           deployment_config=aciconfig)

    # This just deploys and does not register
    # service = Webservice.deploy_from_model(ws,
    #                             name='keras-yolov3-service',
    #                             models=[model],
    #                             deployment_config=aciconfig)

    service.wait_for_deployment(True)
    print(service.state)
Пример #28
0
def main(model_name="deploy", model_version=None, deployment_name="deploy"):
    """
    Return a AciWebservice deploy config
    """
    environment = get_environment(
        name=deployment_name,
        file_path="nd00333/model/deploy/environment.yml",
    )
    logger.info(msg="main", extra={"environment": environment})

    inference_config = InferenceConfig(
        source_directory="nd00333",
        entry_script="model/deploy/score.py",
        environment=environment,
    )
    logger.info(msg="main", extra={"inference_config": inference_config})

    workspace = package_utils.get_workspace()

    deployment_config = AciWebservice.deploy_configuration(
        cpu_cores=1.0,
        memory_gb=8.0,
        auth_enabled=True,
        enable_app_insights=True,
        collect_model_data=False,
    )
    logger.info(msg="main", extra={"deployment_config": deployment_config})

    model = Model(workspace, name=model_name, version=model_version)
    logger.info(msg="main", extra={"model": model})

    service = Model.deploy(
        workspace,
        deployment_name,
        [model],
        inference_config,
        deployment_config,
        overwrite=True,
    )
    logger.info(msg="main", extra={"service": service})

    return service
Пример #29
0
def init():
    global X, output, sess
    tf.reset_default_graph()
    model_root = os.getenv('AZUREML_MODEL_DIR')

    model = Model(ws, 'tf-dnn-mnist')

    # the name of the folder in which to look for tensorflow model files
    tf_model_folder = 'model'
    #saver = tf.train.import_meta_graph(
    #    os.path.join(model_root, tf_model_folder, 'mnist-tf.model.meta'))
    saver = tf.train.import_meta_graph(
        Model.get_model_path('tf-dnn-mnist', 5, ws))
    X = tf.get_default_graph().get_tensor_by_name("network/X:0")
    output = tf.get_default_graph().get_tensor_by_name(
        "network/output/MatMul:0")

    sess = tf.Session()
    saver.restore(
        sess, os.path.join(model_root, tf_model_folder, 'tf-dnn-mnist.model'))
def main():
    load_dotenv()
    workspace_name = os.environ.get("BASE_NAME") + "-AML-WS"
    resource_group = os.environ.get("BASE_NAME") + "-AML-RG"
    subscription_id = os.environ.get("SUBSCRIPTION_ID")
    tenant_id = os.environ.get("TENANT_ID")
    app_id = os.environ.get("SP_APP_ID")
    app_secret = os.environ.get("SP_APP_SECRET")
    MODEL_NAME = os.environ.get('MODEL_NAME')
    model_data_path = os.environ.get("MODEL_DATA_PATH_DATASTORE")

    ws = get_workspace(workspace_name, resource_group, subscription_id,
                       tenant_id, app_id, app_secret)
    modelName = MODEL_NAME.rstrip('h5') + 'onnx'
    model = Model(workspace=ws, name=modelName)
    print(model)
    model.download()
    ds = ws.get_default_datastore()
    print(ds)
    ds.download(target_path='.', prefix=model_data_path, show_progress=True)