def test_get_latest_model(): """Check if get_latest_model works.""" model_folder = "/etc" basename = "model" assert utils.get_latest_model(model_folder, basename) is None small = os.path.join(utils.get_project_root(), "models/small-baseline") utils.get_latest_model(small, basename)
def get_latest_model_test(): """Check if get_latest_model works.""" model_folder = "/etc" basename = "model" nose.tools.assert_equal(utils.get_latest_model(model_folder, basename), None) small = os.path.join(utils.get_project_root(), "models/small-baseline") utils.get_latest_model(small, basename)
def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logging.error("No model with basename '%s' found in '%s'.", basename, model_folder) else: _, model_use = tempfile.mkstemp(suffix='.json', text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logging.info("Evaluate '%s' with '%s'...", model_src, test_file) logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logging.info('Write log to %s...', logfile) with open(logfile, "w") as log, open(model_use, "r") as model_src_p: p = subprocess.Popen([utils.get_nntoolkit(), 'run', '--batch-size', '1', '-f%0.4f', test_file], stdin=model_src_p, stdout=log) ret = p.wait() if ret != 0: logging.error("nntoolkit finished with ret code %s", str(ret)) sys.exit(-1) os.remove(model_use) return logfile
def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logging.error("No model with basename '%s' found in '%s'.", basename, model_folder) else: _, model_use = tempfile.mkstemp(suffix='.json', text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logging.info("Evaluate '%s' with '%s'...", model_src, test_file) logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logging.info('Write log to %s...', logfile) with open(logfile, "w") as log, open(model_use, "r") as model_src_p: p = subprocess.Popen([ utils.get_nntoolkit(), 'run', '--batch-size', '1', '-f%0.4f', test_file ], stdin=model_src_p, stdout=log) ret = p.wait() if ret != 0: logging.error("nntoolkit finished with ret code %s", str(ret)) sys.exit(-1) os.remove(model_use) return logfile
def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logger.error( f"No model with basename '{basename}' found in '{model_folder}'.") else: _, model_use = tempfile.mkstemp(suffix=".json", text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logger.info(f"Evaluate '{model_src}' with '{test_file}'...") logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logger.info(f"Write log to {logfile}...") with open(logfile, "w") as log, open(model_use) as model_src_p: p = subprocess.Popen( [ utils.get_nntoolkit(), "run", "--batch-size", "1", "-f%0.4f", test_file, ], stdin=model_src_p, stdout=log, ) ret = p.wait() if ret != 0: logger.error(f"nntoolkit finished with ret code {ret}") sys.exit(-1) os.remove(model_use) return logfile
def test_execution(): """Test if the functions execute at all.""" utils.get_project_root() utils.get_latest_model(".", "model") utils.get_latest_working_model(".") utils.get_latest_successful_run(".") assert utils.get_readable_time(123) == "123ms" assert utils.get_readable_time(1000 * 30) == "30s 0ms" assert utils.get_readable_time(1000 * 60) == "1 minutes 0s 0ms" assert utils.get_readable_time(1000 * 60 * 60) == "1h, 0 minutes 0s 0ms" assert utils.get_readable_time(2 * 1000 * 60 * 60) == "2h, 0 minutes 0s 0ms" assert utils.get_readable_time(25 * 1000 * 60 * 60 + 3) == "25h, 0 minutes 0s 3ms" utils.print_status(3, 1, 123) utils.get_nntoolkit() utils.get_database_config_file() utils.get_database_configuration() assert utils.sizeof_fmt(1) == "1.0 bytes" assert utils.sizeof_fmt(1111) == "1.1 KB"
def execution_test(): """Test if the functions execute at all.""" utils.get_project_root() utils.get_latest_model(".", "model") utils.get_latest_working_model(".") utils.get_latest_successful_run(".") nose.tools.assert_equal(utils.get_readable_time(123), "123ms") nose.tools.assert_equal(utils.get_readable_time(1000*30), "30s 0ms") nose.tools.assert_equal(utils.get_readable_time(1000*60), "1 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(1000*60*60), "1h, 0 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(2*1000*60*60), "2h, 0 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(25*1000*60*60+3), "25h, 0 minutes 0s 3ms") utils.print_status(3, 1, 123) utils.get_nntoolkit() utils.get_database_config_file() utils.get_database_configuration() nose.tools.assert_equal(utils.sizeof_fmt(1), "1.0 bytes") nose.tools.assert_equal(utils.sizeof_fmt(1111), "1.1 KB")