def main(): # Setup the logger. if not os.path.isdir('log'): os.mkdir('log') log = pym.instantiate_logger(os.path.join('log', 'PET_debug.log')) # Read and log options from command line. (options, args) = parser.parse_args() driver_name = options.driver validation_driver = options.validation_driver log.info('driver : {}, validation_driver[optional] : {}'.format(driver_name, validation_driver)) # Import driver and exception class. driver = __import__(driver_name) # Run the driver. try: driver.main() if validation_driver: log.info(' ---====== Starting Surrogate Validation ======---') __import__(validation_driver).main() pym.check_for_invalid_numbers() pym.update_analysis_status('OK') except TestBenchExecutionError as err: _exit_on_failure(err.message)
def execute_test_bench(modelica_tool, report_file): """ Reads in parameters from sumary.testresults.json and simulates the test bench. When done, writes metrics to sumary.testresults.json Can raise ModelicaError """ log = logging.getLogger() # Read in parameters parameters = pym.get_parameters_from_report_json(report_file) log.info('Parameters : {0}'.format(parameters)) if parameters: modelica_tool.change_parameter(parameters) # Simulate model modelica_tool.simulate_model() t_1 = time.time() # TODO: uncomment the line once META-1226 is complete ## pym.update_metrics_and_check_limits(modelica_tool.result_mat, 'model_config.json') t_mat = time.time() - t_1 # Run PostProcessing scripts try: t_post = pym.run_post_scripts(modelica_tool.result_mat, log) except subprocess.CalledProcessError as err: raise pym.ModelicaError('Post-script execution returned with error.', err.returncode) pym.update_analysis_status(new_status="OK", report_file=report_file) return t_mat, t_post
def main(): if not os.path.isdir('log'): os.mkdir('log') pym.instantiate_logger(os.path.join('log', 'CAD_PET.log')) (options, args) = parser.parse_args() driver_name = options.driver driver = __import__(driver_name) try: driver.main() except CADException as e: _exit_on_failure(e.message) pym.check_for_invalid_numbers() pym.update_analysis_status('OK')
def main(): if not os.path.isdir("log"): os.mkdir("log") pym.instantiate_logger(os.path.join("log", "CAD_PET.log")) (options, args) = parser.parse_args() driver_name = options.driver driver = __import__(driver_name) try: driver.main() except CADException as e: _exit_on_failure(e.message) pym.check_for_invalid_numbers() pym.update_analysis_status("OK")
def execute_test_bench(modelica_tool, report_file): """ Reads in parameters from testbench_manifest.json and simulates the test bench. When done, writes metrics to testbench_manifest.json Can raise ModelicaError """ cur_dir = os.getcwd() log = logging.getLogger() log.debug('inside simulate.py : execute_test_bench, cwd = {0}'.format(cur_dir)) # Read in parameters parameters = pym.get_parameters_from_report_json(report_file) log.info('Parameters : {0}'.format(parameters)) if parameters: modelica_tool.change_parameter(parameters) # Simulate model modelica_tool.simulate_model() t_1 = time.time() # TODO: uncomment the line once META-1226 is complete ## pym.update_metrics_and_check_limits(modelica_tool.result_mat, 'model_config.json') t_mat = time.time() - t_1 # Run PostProcessing scripts # Look for this file after post-processing script ran and delete it when starting a new run. # META-2909 and META-2784 pp_failed = os.path.abspath(os.path.join(cur_dir, '..', '_POST_PROCESSING_FAILED.txt')) if os.path.isfile(pp_failed): log.info('Found {0} from previous run, deleting it...'.format(pp_failed)) os.remove(pp_failed) exception = None try: t_post = pym.run_post_scripts(modelica_tool.result_mat, log) except subprocess.CalledProcessError as err: exception = err finally: pp_output = '' if exception: pp_output = 'Post-script execution returned with error. : {0}'.format(err.returncode) if os.path.isfile(pp_failed): with open(pp_failed, 'r') as f_in: pp_output = '{0}\n POST_PROCESSING_FAILED :\n {1}'.format(pp_output, f_in.read()) if pp_output: raise pym.ModelicaError(pp_output) pym.update_analysis_status(new_status="OK", report_file=report_file) return t_mat, t_post
def _exit_on_failure(error_message): """ Function for exiting after a failure. """ os.chdir(ROOT_DIR) print error_message import traceback with open('_FAILED.txt', 'wb') as f_out: f_out.writelines(error_message) f_out.writelines(traceback.format_exc()) pym.update_analysis_status('FAILED', error_message) log = logging.getLogger() log.error(error_message) log.error("Exception was raised : {0}".format(traceback.format_exc())) sys.exit(1)
def _exit_on_failure(error_message): """ Function for exiting after a failure. """ os.chdir(ROOT_DIR) import traceback with open('_FAILED.txt', 'wb') as f_out: f_out.writelines(error_message) f_out.writelines(traceback.format_exc()) f_out.writelines('See log/PET_debug.log for more info.') pym.update_analysis_status('FAILED', error_message) log = logging.getLogger() log.error(error_message) log.error("Exception was raised : {0}".format(traceback.format_exc())) sys.exit(1)
def _exit_on_failure(error_message): """ Function for exiting after a failure. """ os.chdir(ROOT_DIR) print error_message import traceback with open("_FAILED.txt", "wb") as f_out: f_out.writelines(error_message) f_out.writelines(traceback.format_exc()) pym.update_analysis_status("FAILED", error_message) log = logging.getLogger() log.error(error_message) log.error("Exception was raised : {0}".format(traceback.format_exc())) sys.exit(1)
def _exit_on_failure(error_message): """ Function for exiting after a failure. """ os.chdir(ROOT_DIR) print error_message import traceback the_trace = traceback.format_exc() #traceback.print_exc(file=sys.stderr) with open('_FAILED.txt', 'wb') as f_out: f_out.writelines(error_message) f_out.writelines(traceback.format_exc()) pym.update_analysis_status('FAILED', error_message, 'testbench_manifest.json') log = logging.getLogger() log.error(error_message) log.error("Exception was raised : {0}".format(traceback.format_exc())) sys.exit(1)