def _run_flow(self, job_id, config): """ This is the function that executes the Flow of your configuration. :param job_id: the job identifier used for monitoring via the MLCP (Machine Learning Control Panel). :param config: the configuration as Dictionary. :return: Dictionary containing the status and response of the flow run. """ # update job database_instance.update_job_running(job_id) # call Flow_manager to run the job status_code, response, _ = FlowManager(job_id, config).run() return {'status_code': status_code, 'response': response}
def run_flow(self, asset_name, config_path, config_name=None, **kwargs): """ This function is an endpoint of the ML App Library to be used in a local environment. It runs a local configuration file in your local computer. :param asset_name: name of the asset to be run :param config_path: path to configuration file :param config_name: in case configuration file is python looks for variable in this name as the configuration """ job_id = str(uuid.uuid4()) try: config = read_json_file(config_path) except Exception as err: config = self._read_py_file(asset_name, config_path, config_name) self._insert_latest_id_in_config(config) _, run_ids, outputs = FlowManager(job_id, config, **kwargs).run() self._update_latest_model_id(config, run_ids)
from azureml.core import Run from mlapp.main import MLApp from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance from mlapp.integrations.aml.utils.run_class import load_config_from_string, tag_and_log_run, tag_and_log_outputs import argparse from config import settings from mlapp.managers.flow_manager import FlowManager parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, dest='config', help='configuration') args = parser.parse_args() run = Run.get_context() # pre-processing config = load_config_from_string(args.config) tag_and_log_run(config) # init mlapp MLApp(settings) # run config _, output_ids, output_data = FlowManager(Run.get_context().id, config).run() # post-processing tag_and_log_outputs(output_ids) # post-processing file_storage_instance.postprocessing()
def run_config(configuration): MLApp(settings) _, ids, outputs = FlowManager("deployment", configuration).run() return ids
def run_flow_from_config(config): return FlowManager("deployment", config).run()
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance from mlapp.integrations.aml.utils.flow import parse_args, flow_setup, flow_postprocess from mlapp.integrations.aml.utils.constants import PARSED_ARG_CONFIG, PARSED_ARG_INPUT_DIR, PARSED_ARG_OUTPUT_DIR from mlapp.managers.flow_manager import FlowManager from mlapp.integrations.aml.utils.run_class import load_config_from_string, tag_and_log_run, tag_and_log_outputs from config import settings # parsing arguments parsed_args = parse_args() # pre-processing config = load_config_from_string(parsed_args[PARSED_ARG_CONFIG]) tag_and_log_run(config) # current run identification current_run = Run.get_context() # init mlapp MLApp(settings) # flow setup jobs_outputs = flow_setup(current_run.id, config, parsed_args[PARSED_ARG_INPUT_DIR]) # run config _, output_ids, output_data = FlowManager(current_run.id, config, **jobs_outputs).run() # post-processing tag_and_log_outputs(output_ids) file_storage_instance.postprocessing() flow_postprocess(config, output_data, parsed_args[PARSED_ARG_OUTPUT_DIR])