def run_model(hyper_parameter_map): instance_directory = hyper_parameter_map['instance_directory'] os.chdir(instance_directory) global logger logger = log_tools.get_logger(logger, "MODEL RUNNER") obj_return = get_obj_return() result = run_pre(hyper_parameter_map) if result == ModelResult.ERROR: print("run_pre() returned ERROR!") exit(1) elif result == ModelResult.SKIP: log("run_pre() returned SKIP ...") sys.stdout.flush() return ("SKIP", "HISTORY_EMPTY") else: assert (result == ModelResult.SUCCESS) # proceed... result, history = run(hyper_parameter_map, obj_return) runner_utils.write_output(result, instance_directory) runner_utils.write_output( json.dumps(history, cls=runner_utils.FromNPEncoder), instance_directory, 'history.txt') run_post(hyper_parameter_map, {}) log("RUN STOP") return (result, history)
# Usage: see how sys.argv is unpacked below: if __name__ == '__main__': logger = log_tools.get_logger(logger, __name__) logger.debug("RUN START") ( _, # The Python program name (unused) param_string, instance_directory, framework, runid, obj_param, benchmark_timeout) = sys.argv hyper_parameter_map = runner_utils.init(param_string, instance_directory, framework, 'save') hyper_parameter_map['model_name'] = os.getenv("MODEL_NAME") hyper_parameter_map['experiment_id'] = os.getenv("EXPID") hyper_parameter_map['run_id'] = runid hyper_parameter_map['timeout'] = benchmark_timeout # clear sys.argv so that argparse doesn't object sys.argv = ['nt3_tc1_runner'] # Call to Benchmark! result = run(hyper_parameter_map, obj_param) runner_utils.write_output(result, instance_directory) logger.debug("RUN STOP")