def get_model(ws: Workspace,
              env: Env,
              tagname: str = None,
              tagvalue: str = None) -> Model:
    """
    Gets a model from the models registered with the AML workspace.
    If a tag/value pair is supplied, uses it to filter.

    :param ws: Current AML workspace
    :param env: Environment variables
    :param tagname: Optional tag name, default is None
    :param tagvalue: Optional tag value, default is None

    :returns: Model

    :raises: ValueError
    """
    if tagname is not None and tagvalue is not None:
        model = Model(ws, name=env.model_name, tags=[[tagname, tagvalue]])
    elif (tagname is None
          and tagvalue is not None) or (tagvalue is None
                                        and tagname is not None):
        raise ValueError(
            "model_tag_name and model_tag_value should both be supplied" +
            "or excluded"  # NOQA: E501
        )
    else:
        model = Model(ws, name=env.model_name)
    return model
Пример #2
0
    def download_file(self, bucket_name, object_name, file_path, *args,
                      **kwargs):
        """
        This method downloads a file into your machine from AML datastore
        :param bucket_name:  name of the bucket/container, unnecessary parameter
        :param object_name: name of the object/file
        :param file_path: path to local file, unnecessary parameter
        :param args: other arguments containing additional information
        :param kwargs: other keyword arguments containing additional information
        :return: None
        """
        # get run_id from object_name
        current_id = object_name.split('_')[0]

        if current_id not in self.downloaded_files.keys(
        ) or not self.downloaded_files[current_id]:
            # load registered models
            model_name = get_model_register_name(current_id)
            model = Model(self.ws, name=model_name)
            model.download(target_dir=current_id, exist_ok=True)

            # mark current_id as registered
            self.downloaded_files[current_id] = {"path": current_id}

        # move file to AML_MLAPP_FOLDER folder from current_id folder
        if os.path.exists(
                os.path.join(current_id, AML_MLAPP_FOLDER, object_name)):
            shutil.move(
                os.path.join(current_id, AML_MLAPP_FOLDER, object_name),
                self.temporary_storage)
Пример #3
0
def run(raw_data):
    Inputs = pd.DataFrame(ast.literal_eval(json.loads(raw_data)['Inputs']))

    timestamp_column = 'WeekStarting'
    Inputs[timestamp_column] = pd.to_datetime(Inputs[timestamp_column])

    timeseries_id_columns = ['Store', 'Brand']
    data = Inputs \
            .set_index(timestamp_column) \
            .sort_index(ascending=True)
    #Prepare loading model from Azure ML, get the latest model by default
    model_name = "prs_" + str(data['Store'].iloc[0]) + "_" + str(
        data['Brand'].iloc[0])
    model = Model(ws, model_name)
    model.download(exist_ok=True)
    forecaster = joblib.load(model_name)

    #   Get predictions
    #This is to append the store and brand column to the result
    ts_id_dict = {
        id_col: str(data[id_col].iloc[0])
        for id_col in timeseries_id_columns
    }
    forecasts = forecaster.forecast(data)
    prediction_df = forecasts.to_frame(name='Prediction')
    prediction_df = prediction_df.reset_index().assign(**ts_id_dict)

    return prediction_df.to_json()
Пример #4
0
    def list_files(self, bucket_name, prefix="", *args, **kwargs):
        """
        Lists files in file storage
        :param bucket_name: name of the bucket/container
        :param prefix: prefix string to search by
        :param args: other arguments containing additional information
        :param kwargs: other keyword arguments containing additional information
        """
        try:
            # get run_id from object_name
            current_id = prefix

            if current_id not in self.downloaded_files.keys():
                # load registered models
                model_name = get_model_register_name(current_id)
                model = Model(self.ws, name=model_name)
                model.download(target_dir=current_id, exist_ok=True)

                # mark current_id as registered
                self.downloaded_files[current_id] = {"path": current_id}

            # get files from current_id folder and temporary_storage folder
            list_files = os.listdir(os.path.join(current_id, AML_MLAPP_FOLDER))
            list_files_extra = list(
                map(
                    lambda f: os.path.basename(f),
                    glob.glob(
                        os.path.join(self.temporary_storage,
                                     current_id + "_*"))))
            list_files.extend(list_files_extra)
            return list_files

        except Exception as e:
            logging.error(e)
            raise e
Пример #5
0
def run(mini_batch):
    print(f'run method start: {__file__}, run({mini_batch})')

    timestamp_column = 'WeekStarting'

    timeseries_id_columns = ['Store', 'Brand']
    data = mini_batch \
            .set_index(timestamp_column) \
            .sort_index(ascending=True)
    #Prepare loading model from Azure ML, get the latest model by default
    model_name = "prs_" + str(data['Store'].iloc[0]) + "_" + str(
        data['Brand'].iloc[0])
    model = Model(ws, model_name)
    model.download(exist_ok=True)
    forecaster = joblib.load(model_name)

    #   Get predictions
    #This is to append the store and brand column to the result
    ts_id_dict = {
        id_col: str(data[id_col].iloc[0])
        for id_col in timeseries_id_columns
    }
    forecasts = forecaster.forecast(data)
    prediction_df = forecasts.to_frame(name='Prediction')
    prediction_df = prediction_df.reset_index().assign(**ts_id_dict)

    return prediction_df
Пример #6
0
def load_model_from_registry(model_name, model_version):
    logging.info(
        f' >> load_model_from_registry({model_name},{model_version}) ...')
    try:
        aml_model = Model(workspace, name=model_name, version=model_version)
        model_path = aml_model.download(target_dir='.', exist_ok=True)
        return joblib.load(model_path)
    except Exception as e:
        logging.error(e)
        raise
Пример #7
0
    def get_checkpoints_from_model(model_id: str, workspace: Workspace, download_path: Path) -> List[Path]:
        if len(model_id.split(":")) != 2:
            raise ValueError(
                f"model_id should be in the form 'model_name:version', got {model_id}")

        model_name, model_version = model_id.split(":")
        model = Model(workspace=workspace, name=model_name, version=int(model_version))
        model_path = Path(model.download(str(download_path), exist_ok=True))
        model_inference_config = read_model_inference_config(model_path / MODEL_INFERENCE_JSON_FILE_NAME)
        checkpoint_paths = [model_path / x for x in model_inference_config.checkpoint_paths]
        return checkpoint_paths
Пример #8
0
def main(event: func.EventGridEvent):
    result = json.dumps({
        'id': event.id,
        'data': event.get_json(),
        'topic': event.topic,
        'subject': event.subject,
        'event_type': event.event_type,
    })

    logging.info('Python EventGrid trigger processed an event: %s', result)

    # get service principal from env variables
    sp_auth = ServicePrincipalAuthentication(
        tenant_id=os.getenv('TENANT_ID', ''),
        service_principal_id=os.getenv('SP_ID', ''),
        service_principal_password=os.getenv('SP_PASSWORD', ''))

    # parse azure subscription ID, resource group name, and ML workspace name from event grid event topic
    sub_tag = "subscriptions"
    rg_tag = "resourceGroups"
    ws_provider_tag = "providers/Microsoft.MachineLearningServices/workspaces"

    subscription_id = event.topic.split("{}/".format(sub_tag),
                                        1)[1].split("/{}".format(rg_tag), 1)[0]
    resource_group_name = event.topic.split("{}/".format(rg_tag), 1)[1].split(
        "/{}".format(ws_provider_tag), 1)[0]
    workspace_name = event.topic.split("{}/".format(ws_provider_tag),
                                       1)[1].split("/", 1)[0]

    # get workspace
    ws = Workspace.get(name=workspace_name,
                       auth=sp_auth,
                       subscription_id=subscription_id,
                       resource_group=resource_group_name)

    logging.info(
        'SubscriptionID = %s; ResourceGroup = %s; WorkSpace = %s; Location = %s',
        ws.subscription_id, ws.resource_group, ws.name, ws.location)

    # get model from event data
    event_data = event.get_json()
    model_id = '{}:{}'.format(event_data['modelName'],
                              event_data['modelVersion'])
    model = Model(ws, id=model_id)
    logging.info('Model name = %s', model.name)

    # perform no code deploy, in a fire-n-forget way, as we don't need to hold the Functions App resource and we will
    # respond to event grid request in time so that Event Grid won't timeout and retry.
    service_name = 'acitest-{}-{}'.format(event_data['modelName'],
                                          event_data['modelVersion'])
    service = Model.deploy(ws, service_name, [model])
    logging.info('Start deploying service %s to ACI', service.name)
Пример #9
0
            def register_model(model_name, model_path):
                model_config = next(
                    iter(
                        filter(lambda x: x["name"] == model_name,
                               self.output_reg_models)))

                tags = model_config.get("tags")
                description = model_config.get("description")

                Model.register(workspace=ws,
                               model_path=model_path,
                               model_name=model_name,
                               tags=tags,
                               description=description)
Пример #10
0
def run(project_root: Optional[Path] = None) -> None:
    """
    Runs inference on an image. This can be invoked in one of two ways:
    (1) when there is already a model in the project_root directory; this is the case when
    we arrive here from python_wrapper.py
    (2) when we need to download a model, which must be specified by the --model-id switch.
    This is the case when this script is invoked by submit_for_inference.py.
    :param project_root: the directory in which the model (including code) is located.
    Must be None if and only if the --model-id switch is provided.
    """
    parser = argparse.ArgumentParser(
        description=
        'Execute code baked into a Docker Container from AzureML ScriptRunConfig'
    )
    parser.add_argument('--spawnprocess',
                        dest='spawnprocess',
                        action='store',
                        type=str)
    parser.add_argument('--data-folder',
                        dest='data_folder',
                        action='store',
                        type=str)
    parser.add_argument('--model-id',
                        dest='model_id',
                        action='store',
                        type=str)
    known_args, unknown_args = parser.parse_known_args()
    if known_args.model_id:
        if project_root:
            raise ValueError(
                "--model-id should not be provided when project_root is specified"
            )
        workspace = Run.get_context().experiment.workspace
        model = Model(workspace=workspace, id=known_args.model_id)
        current_dir = Path(".")
        project_root = Path(model.download(str(current_dir))).absolute()
    elif not project_root:
        raise ValueError(
            "--model-id must be provided when project_root is unspecified")
    script_path = Path('run_score.sh')
    write_script(parser, script_path, project_root)
    print(f"Running {script_path} ...")
    env = dict(os.environ.items())
    # Work around https://github.com/pytorch/pytorch/issues/37377
    env['MKL_SERVICE_FORCE_INTEL'] = '1'
    code = spawn_and_monitor_subprocess(process='bash',
                                        args=[str(script_path)],
                                        env=env)
    sys.exit(code)
Пример #11
0
def init():
    global model
    global username
    global git_repo_url
    global gitAPI
    global jenkins_key
    global jenkins_url
    global sql_server
    global sql_database
    global sql_username
    global sql_password
    global used_file_extensions

    # TODO: Change model name
    model_name = 'TestAutoModel'
    # TODO: Update list of file extensions used in your project
    used_file_extensions = ['swift', 'pdf']

    model_path = Model.get_model_path(model_name=model_name)
    model = joblib.load(model_path)

    load_dotenv(verbose=True, dotenv_path=Path('.') / 'server_files' / 'env')
    username = os.getenv("USERNAME")
    git_repo_url = os.getenv("GIT_REPO_URL")
    gitAPI = os.getenv("GIT_KEY")
    jenkins_key = os.getenv("JENKINS_KEY")
    jenkins_url = os.getenv("JENKINS_URL")
    sql_server = os.getenv("SQL_SERVER")
    sql_database = os.getenv("SQL_DATABASE")
    sql_username = os.getenv("SQL_USERNAME")
    sql_password = os.getenv("SQL_PASSWORD")
Пример #12
0
def init():
    # Runs when the pipeline step is initialized
    global model

    # load the model
    model_path = Model.get_model_path('classification_model')
    model = joblib.load(model_path)
Пример #13
0
def init():
    # Runs when the pipeline step is initialized
    global model

    # load the model
    model_path = Model.get_model_path('diabetes_model')
    model = joblib.load(model_path)
Пример #14
0
def init():
    global model_p
    print("Executed")
    model_path = Model.get_model_path('optimal_model.joblib')
    #model_path = Model.get_model_path('model_automl')
    print(model_path)
    model_p = joblib.load(model_path)
Пример #15
0
def download_conda_dependency_files(model: Model, dir_path: Path) -> List[Path]:
    """
    Identifies all the files with basename "environment.yml" in the model and downloads them
    to tmp_environment_001.yml, tmp_environment_002.yml etc. Normally there will be one of these
    if the model was build directly from a clone of the InnerEye-DeepLearning repo, or two if
    it was built from the user's own directory which had InnerEye-Deeplearning as a submodule.
    :param model: model to search in
    :param dir_path: folder to write the tmp...yml files into
    :return: a list of the tmp...yml files created
    """
    url_dict = model.get_sas_urls()
    downloaded: List[Path] = []
    for path, url in url_dict.items():
        if Path(path).name == ENVIRONMENT_YAML_FILE_NAME:
            target_path = dir_path / f"tmp_environment_{len(downloaded) + 1:03d}.yml"
            target_path.write_bytes(requests.get(url, allow_redirects=True).content)
            # Remove additional information from the URL to make it more legible
            index = url.find("?")
            if index > 0:
                url = url[:index]
            logging.info(f"Downloaded {target_path} from {url}")
            downloaded.append(target_path)
    if not downloaded:
        logging.warning(f"No {ENVIRONMENT_YAML_FILE_NAME} files found in the model!")
    return downloaded
Пример #16
0
def upload_model(ws: Workspace, config: MoveModelConfig) -> Model:
    """
    Uploads an InnerEye model to an AzureML workspace
    :param ws: The AzureML workspace
    :param config: move config
    :return: imported Model
    """
    model_path, environment_path = config.get_paths()
    with open(model_path / MODEL_JSON, 'r') as f:
        model_dict = json.load(f)

    # Find the folder containing the final model.
    final_model_path = model_path / FINAL_MODEL_FOLDER
    full_model_path = final_model_path if final_model_path.exists(
    ) else model_path / FINAL_ENSEMBLE_MODEL_FOLDER

    new_model = Model.register(ws,
                               model_path=str(full_model_path),
                               model_name=model_dict['name'],
                               tags=model_dict['tags'],
                               properties=model_dict['properties'],
                               description=model_dict['description'])
    env = Environment.load_from_directory(str(environment_path))
    env.register(workspace=ws)
    print(f"Environment {env.name} registered")
    return new_model
Пример #17
0
def init():
    global g_tf_sess

    svc_pr_password = "******"
 
    svc_pr = ServicePrincipalAuthentication(
        tenant_id="72f988bf-86f1-41af-91ab-2d7cd011db47",
        service_principal_id="8a3ddafe-6dd6-48af-867e-d745232a1833",
        service_principal_password="******")

    ws = Workspace(
        subscription_id="c46a9435-c957-4e6c-a0f4-b9a597984773",
        resource_group="mlops",
        workspace_name="gputraining",
        auth=svc_pr
        )
    model_root = os.getenv('AZUREML_MODEL_DIR')
    # Pull down the model from the workspace
    model_path = Model.get_model_path("tf-dnn-mnist")
    tf_model_folder = 'model'
    # Create a model folder in the current directory
    os.makedirs('./outputs', exist_ok=True)
    os.makedirs('./outputs/model', exist_ok=True)

    # Construct a graph to execute
    tf.reset_default_graph()
    saver = tf.train.import_meta_graph(os.path.join(model_path, 'tf-dnn-mnist.meta'))
    g_tf_sess = tf.Session()
    #saver.restore(g_tf_sess, os.path.join(model_path, tf_model_folder, 'tf-dnn-mnist.model'))
    saver.restore(g_tf_sess, os.path.join(model_path, 'tf-dnn-mnist'))
Пример #18
0
def download_model(ws: Workspace, config: MoveModelConfig) -> Model:
    """
    Downloads an InnerEye model from an AzureML workspace
    :param ws: The AzureML workspace
    :param config: move config
    :return: the exported Model
    """
    model = Model(ws, id=config.model_id)
    model_path, environment_path = config.get_paths()
    with open(model_path / MODEL_JSON, 'w') as f:
        json.dump(model.serialize(), f)
    model.download(target_dir=str(model_path))
    env_name = model.tags.get(PYTHON_ENVIRONMENT_NAME)
    environment = ws.environments.get(env_name)
    environment.save_to_directory(str(environment_path), overwrite=True)
    return model
Пример #19
0
def evaluate_model():
    all_runs = exp.get_runs(properties={
        "release_id": release_id,
        "run_type": "train"
    },
                            include_children=True)
    # print(f'Search parameters: properties="release_id": {release_id}, "run_type": "train"')
    # print(f'Experiment :{exp}')

    # li_test = list(all_runs)
    # print(f'li_test: {li_test}')

    # all_runs contains the reference to the entire run that satisfied the properties values in the query:
    # The list of runs is returned in decending order, so the first value is the most recent run
    new_model_run = next(all_runs)
    new_model_run_id = new_model_run.id
    print(f'New Run found with Run ID of: {new_model_run_id}')

    new_model_run = Run(exp, run_id=new_model_run_id)
    new_model_acc = new_model_run.get_metrics().get("final-accuracy")

    try:
        # Get most recently registered model, we assume that
        # is the model in production.
        # Download this model and compare it with the recently
        # trained model by running test with same data set.
        model_list = Model.list(ws)
        production_model = next(
            filter(
                lambda x: x.created_time == max(model.created_time
                                                for model in model_list),
                model_list,
            ))
        production_model_run_id = production_model.tags.get("run_id")
        run_list = exp.get_runs()

        # Get the run history for both production model and
        # newly trained model and compare final-accuracy
        production_model_run = Run(exp, run_id=production_model_run_id)

        production_model_acc = production_model_run.get_metrics().get(
            "final-accuracy")

        print(
            "Current Production model accuracy: {}, New trained model accuracy: {}"
            .format(production_model_acc, new_model_acc))

        promote_new_model = False
        if new_model_acc < production_model_acc:
            promote_new_model = True
            print(
                "New trained model performs better, thus it will be registered"
            )
    except Exception:
        promote_new_model = True
        print("This is the first model to be trained, \
            thus nothing to evaluate for now")

    return promote_new_model, new_model_run, new_model_acc
def submit_for_inference(args: SubmitForInferenceConfig,
                         azure_config: AzureConfig) -> Optional[Path]:
    """
    Create and submit an inference to AzureML, and optionally download the resulting segmentation.
    :param azure_config: An object with all necessary information for accessing Azure.
    :param args: configuration, see SubmitForInferenceConfig
    :return: path to downloaded segmentation on local disc, or None if none.
    """
    logging.info(f"Building Azure configuration from {args.settings}")
    logging.info("Getting workspace")
    workspace = azure_config.get_workspace()
    logging.info("Identifying model")
    model = Model(workspace=workspace, id=args.model_id)
    model_id = model.id
    logging.info(f"Identified model {model_id}")
    source_directory = tempfile.TemporaryDirectory()
    source_directory_name = source_directory.name
    logging.info(
        f"Building inference run submission in {source_directory_name}")
    source_directory_path = Path(source_directory_name)
    copy_image_file(args.image_file,
                    source_directory_path / DEFAULT_DATA_FOLDER)
    # We copy over run_scoring.py, and score.py as well in case the model we're using
    # does not have sufficiently recent versions of those files.
    for base in ["run_scoring.py", "score.py"]:
        shutil.copyfile(base, str(source_directory_path / base))
    source_config = SourceConfig(
        root_folder=source_directory_name,
        entry_script=str(source_directory_path / "run_scoring.py"),
        script_params={
            "--data-folder": ".",
            "--spawnprocess": "python",
            "--model-id": model_id,
            "score.py": ""
        },
        conda_dependencies_files=download_conda_dependency_files(
            model, source_directory_path))
    estimator = create_estimator_from_configs(workspace, azure_config,
                                              source_config, [])
    exp = Experiment(workspace=workspace, name=args.experiment_name)
    run = exp.submit(estimator)
    logging.info(f"Submitted run {run.id} in experiment {run.experiment.name}")
    logging.info(f"Run URL: {run.get_portal_url()}")
    if not args.keep_upload_folder:
        source_directory.cleanup()
        logging.info(f"Deleted submission directory {source_directory_name}")
    if args.download_folder is None:
        return None
    logging.info("Awaiting run completion")
    run.wait_for_completion()
    logging.info(f"Run has completed with status {run.get_status()}")
    download_path = choose_download_path(args.download_folder)
    logging.info(f"Attempting to download segmentation to {download_path}")
    run.download_file(DEFAULT_RESULT_IMAGE_NAME, str(download_path))
    if download_path.exists():
        logging.info(f"Downloaded segmentation to {download_path}")
    else:
        logging.warning("Segmentation NOT downloaded")
    return download_path
def run() -> None:
    """
    Downloads a model from AzureML, and starts the score script (usually score.py) in the root folder of the model.
    Downloading the model is only supported if the present code is running inside of AzureML. When running outside
    of AzureML, the model must have been downloaded beforehand into the folder given by the model-folder argument.
    The script is executed with the current Python interpreter.
    If the model requires a specific Conda environment to run in, the caller of this script needs to ensure
    that this has been set up correctly (taking the environment.yml file stored in the model).
    All arguments that are not recognized by the present code will be passed through to `score.py` unmodified.
    Example arguments:
        download_model_and_run_scoring.py --model-id=Foo:1 score.py --foo=1 --bar
    This would attempt to download version 1 of model Foo, and then start the script score.py in the model's root
    folder. Arguments --foo and --bar are passed through to score.py
    """
    parser = argparse.ArgumentParser(description='Execute code inside of an AzureML model')
    # Use argument names with dashes here. The rest of the codebase uses _ as the separator, meaning that there
    # can't be a clash of names with arguments that are passed through to score.py
    parser.add_argument('--model-folder', dest='model_folder', action='store', type=str)
    parser.add_argument('--model-id', dest='model_id', action='store', type=str)
    known_args, unknown_args = parser.parse_known_args()
    model_folder = known_args.model_folder or "."
    if known_args.model_id:
        current_run = Run.get_context()
        if not hasattr(current_run, 'experiment'):
            raise ValueError("The model-id argument can only be used inside AzureML. Please drop the argument, and "
                             "supply the downloaded model in the model-folder.")
        workspace = current_run.experiment.workspace
        model = Model(workspace=workspace, id=known_args.model_id)
        # Download the model from AzureML into a sub-folder of model_folder
        model_folder = str(Path(model.download(model_folder)).absolute())
    env = dict(os.environ.items())
    # Work around https://github.com/pytorch/pytorch/issues/37377
    env['MKL_SERVICE_FORCE_INTEL'] = '1'
    # The model should include all necessary code, hence point the Python path to its root folder.
    env['PYTHONPATH'] = model_folder
    if not unknown_args:
        raise ValueError("No arguments specified for starting the scoring script.")
    score_script = Path(model_folder) / unknown_args[0]
    score_args = [str(score_script), *unknown_args[1:]]
    if not score_script.exists():
        raise ValueError(f"The specified entry script {score_args[0]} does not exist in {model_folder}")
    print(f"Starting Python with these arguments: {' '.join(score_args)}")
    code, stdout = spawn_and_monitor_subprocess(process=sys.executable, args=score_args, env=env)
    if code != 0:
        print(f"Python terminated with exit code {code}. Stdout: {os.linesep.join(stdout)}")
    sys.exit(code)
def get_most_recent_model(fallback_run_id_for_local_execution: str = FALLBACK_SINGLE_RUN) -> Model:
    """
    Gets the string name of the most recently executed AzureML run, extracts which model that run had registered,
    and return the instantiated model object.
    :param fallback_run_id_for_local_execution: A hardcoded AzureML run ID that is used when executing this code
    on a local box, outside of Azure build agents.
    """
    model_id = get_most_recent_model_id(fallback_run_id_for_local_execution=fallback_run_id_for_local_execution)
    return Model(workspace=get_default_workspace(), id=model_id)
Пример #23
0
def init():
    global model
    # AZUREML_MODEL_DIR is an environment variable created during deployment. Join this path with the filename of the model file.
    # It holds the path to the directory that contains the deployed model (./azureml-models/$MODEL_NAME/$VERSION).
    # If there are multiple models, this value is the path to the directory containing all deployed models (./azureml-models).
    model_path = Model.get_model_path('new_model')
    #model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'trained_model.pkl')
    # Deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
Пример #24
0
def init():

    # retrieve the path to the model file using the model name
    model_name = 'automl_best_model'

    global model

    model_path = Model.get_model_path(model_name)
    model = joblib.load(model_path)
Пример #25
0
def init():
    global model
    global inputs_dc, prediction_dc
    # The AZUREML_MODEL_DIR environment variable indicates
    # a directory containing the model file you registered.
    model_path = Model.get_model_path(model_name="model")
    model = joblib.load(model_path)
    inputs_dc = ModelDataCollector("sample-model", designation="inputs", feature_names=["feat1", "feat2", "feat3", "feat4"])
    prediction_dc = ModelDataCollector("sample-model", designation="predictions", feature_names=["prediction"])
Пример #26
0
def get_model_filename(run, model_name, model_path):
    model = Model(run.experiment.workspace, model_name)
    if "model_file_name" in model.tags:
        return model.tags["model_file_name"]
    is_pkl = True
    if model.tags.get("algorithm") == "TCNForecaster" or os.path.exists(
            os.path.join(model_path, "model.pt")):
        is_pkl = False
    return "model.pkl" if is_pkl else "model.pt"
Пример #27
0
def registerModel(workspace, experiment, model_name, model_file):
    '''
        Search an existing AMLS workspace for models. If one is found, return it, 
        otherwise create a new model. 

        If the parameter model_file points to a file on disk (check existence), then that
        is used to register a new model. If not, a new dummy pkl file will be generated. 

        PARAMS: 
            workspace        : azureml.core.Workspace   : Existing AMLS Workspace
            experiment_name  : azureml.core.Experiment  : Existing AMLS Experiment
            model_name       : String                   : The name of the model to register
            model_file       : String                   : This is one of two values
                                                            1. Name of a pkl file to create (dummy for RTS)
                                                            2. Full path to pkl model file that is in the same 
                                                               directory as the running script. 


        RETURNS: 
            azureml.core.Model
    '''

    return_model = None
    '''
        If model already exists then just return it. 
    '''
    models = Model.list(workspace)
    if models:
        for model in models:
            if model.name == model_name:
                print("Returning existing model....", model_name)
                return_model = model
                break

    if not return_model:
        '''
            Create it. 
        '''
        print("Creating new  model....")
        run = experiment.start_logging()
        run.log("Just simply dumping somethign in", True)

        # If the file does not exist, create a dummy model file.
        if os.path.exists(model_file) == False:
            createPickle(model_file)

        run.upload_file(name='outputs/' + model_file,
                        path_or_stream='./' + model_file)

        # Complete tracking and get link to details
        details = run.complete()

        return_model = run.register_model(model_name=model_name,
                                          model_path="outputs/" + model_file)

    return return_model
Пример #28
0
    def _register(self, run_id, asset_name, asset_label=None):
        """
        This method register a new model as a AML reference.
        :param run_id: run's identifier
        :param asset_name: name of asset of the current run
        :param asset_label: name of label of the current run
        :return: None
        """
        tags = {"run_id": run_id, "asset_name": asset_name}

        if asset_label is not None:
            tags["asset_label"] = asset_label

        register_path = os.path.join(OUTPUTS_FOLDER, AML_MLAPP_FOLDER)
        Model.register(self.ws,
                       model_path=register_path,
                       model_name=get_model_register_name(run_id),
                       tags=tags,
                       description=asset_name)
Пример #29
0
    def register(self, validated_model_folder, registered_model_folder,
                 azure_ml_logs_provider, web_service_deployer):

        IGNORE_TRAIN_STEP = azure_ml_logs_provider.get_tag_from_brother_run(
            "prep_data.py", "IGNORE_TRAIN_STEP")
        if IGNORE_TRAIN_STEP == True:
            print("Ignore register step")
            self._execute_sampling_pipeline()
            print("launch sampling state")
            return

        _, classifier = disc_network()
        classifier_name = "classifier.hdf5"
        validated_model_file = os.path.join(validated_model_folder,
                                            classifier_name)
        classifier.load_weights(validated_model_file)

        self.run.upload_file(name=self.config.MODEL_NAME,
                             path_or_stream=validated_model_file)

        #_ = self.run.register_model(model_name=self.config.MODEL_NAME,
        #                        tags={'Training context':'Pipeline'},
        #                        model_path=validated_model_file)

        Model.register(workspace=self.run.experiment.workspace,
                       model_path=validated_model_file,
                       model_name=self.config.MODEL_NAME,
                       tags={'Training context': 'Pipeline'})

        acc = azure_ml_logs_provider.get_log_from_brother_run(
            "eval_model.py", "acc")
        print("acc :", acc)
        #deploy model
        if web_service_deployer.to_deploy(acc):
            print("deploying...")
            web_service_deployer.deploy()
            print("model deployed")

        #pas si import à part pour le test
        registered_model_file = os.path.join(registered_model_folder,
                                             classifier_name)
        os.makedirs(registered_model_folder)
        _ = shutil.copy(validated_model_file, registered_model_file)
Пример #30
0
def init():
    global g_tf_sess

    # pull down model from workspace
    model_path = Model.get_model_path("mnist")

    # contruct graph to execute
    tf.reset_default_graph()
    saver = tf.train.import_meta_graph(os.path.join(model_path, 'mnist-tf.model.meta'))
    g_tf_sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    saver.restore(g_tf_sess, os.path.join(model_path, 'mnist-tf.model'))