def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logging.error("No model with basename '%s' found in '%s'.", basename, model_folder) else: _, model_use = tempfile.mkstemp(suffix='.json', text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logging.info("Evaluate '%s' with '%s'...", model_src, test_file) logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logging.info('Write log to %s...', logfile) with open(logfile, "w") as log, open(model_use, "r") as model_src_p: p = subprocess.Popen([ utils.get_nntoolkit(), 'run', '--batch-size', '1', '-f%0.4f', test_file ], stdin=model_src_p, stdout=log) ret = p.wait() if ret != 0: logging.error("nntoolkit finished with ret code %s", str(ret)) sys.exit(-1) os.remove(model_use) return logfile
def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logging.error("No model with basename '%s' found in '%s'.", basename, model_folder) else: _, model_use = tempfile.mkstemp(suffix='.json', text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logging.info("Evaluate '%s' with '%s'...", model_src, test_file) logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logging.info('Write log to %s...', logfile) with open(logfile, "w") as log, open(model_use, "r") as model_src_p: p = subprocess.Popen([utils.get_nntoolkit(), 'run', '--batch-size', '1', '-f%0.4f', test_file], stdin=model_src_p, stdout=log) ret = p.wait() if ret != 0: logging.error("nntoolkit finished with ret code %s", str(ret)) sys.exit(-1) os.remove(model_use) return logfile
def get_test_results(model_folder, basename, test_file): model_src = utils.get_latest_model(model_folder, basename) if model_src is None: logger.error( f"No model with basename '{basename}' found in '{model_folder}'.") else: _, model_use = tempfile.mkstemp(suffix=".json", text=True) utils.create_adjusted_model_for_percentages(model_src, model_use) # Start evaluation project_root = utils.get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logger.info(f"Evaluate '{model_src}' with '{test_file}'...") logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) logger.info(f"Write log to {logfile}...") with open(logfile, "w") as log, open(model_use) as model_src_p: p = subprocess.Popen( [ utils.get_nntoolkit(), "run", "--batch-size", "1", "-f%0.4f", test_file, ], stdin=model_src_p, stdout=log, ) ret = p.wait() if ret != 0: logger.error(f"nntoolkit finished with ret code {ret}") sys.exit(-1) os.remove(model_use) return logfile
def test_execution(): """Test if the functions execute at all.""" utils.get_project_root() utils.get_latest_model(".", "model") utils.get_latest_working_model(".") utils.get_latest_successful_run(".") assert utils.get_readable_time(123) == "123ms" assert utils.get_readable_time(1000 * 30) == "30s 0ms" assert utils.get_readable_time(1000 * 60) == "1 minutes 0s 0ms" assert utils.get_readable_time(1000 * 60 * 60) == "1h, 0 minutes 0s 0ms" assert utils.get_readable_time(2 * 1000 * 60 * 60) == "2h, 0 minutes 0s 0ms" assert utils.get_readable_time(25 * 1000 * 60 * 60 + 3) == "25h, 0 minutes 0s 3ms" utils.print_status(3, 1, 123) utils.get_nntoolkit() utils.get_database_config_file() utils.get_database_configuration() assert utils.sizeof_fmt(1) == "1.0 bytes" assert utils.sizeof_fmt(1111) == "1.1 KB"
def execution_test(): """Test if the functions execute at all.""" utils.get_project_root() utils.get_latest_model(".", "model") utils.get_latest_working_model(".") utils.get_latest_successful_run(".") nose.tools.assert_equal(utils.get_readable_time(123), "123ms") nose.tools.assert_equal(utils.get_readable_time(1000*30), "30s 0ms") nose.tools.assert_equal(utils.get_readable_time(1000*60), "1 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(1000*60*60), "1h, 0 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(2*1000*60*60), "2h, 0 minutes 0s 0ms") nose.tools.assert_equal(utils.get_readable_time(25*1000*60*60+3), "25h, 0 minutes 0s 3ms") utils.print_status(3, 1, 123) utils.get_nntoolkit() utils.get_database_config_file() utils.get_database_configuration() nose.tools.assert_equal(utils.sizeof_fmt(1), "1.0 bytes") nose.tools.assert_equal(utils.sizeof_fmt(1111), "1.1 KB")
def generate_training_command(model_folder): """Generate a string that contains a command with all necessary parameters to train the model.""" update_if_outdated(model_folder) model_description_file = os.path.join(model_folder, "info.yml") # Read the model description file with open(model_description_file, 'r') as ymlfile: model_description = yaml.load(ymlfile) # Get the data paths (hdf5 files) project_root = utils.get_project_root() data = {} data['training'] = os.path.join(project_root, model_description["data-source"], "traindata.hdf5") data['testing'] = os.path.join(project_root, model_description["data-source"], "testdata.hdf5") data['validating'] = os.path.join(project_root, model_description["data-source"], "validdata.hdf5") # Get latest model file basename = "model" latest_model = utils.get_latest_working_model(model_folder) if latest_model == "": logging.error("There is no model with basename '%s'.", basename) return None else: logging.info("Model '%s' found.", latest_model) i = int(latest_model.split("-")[-1].split(".")[0]) model_src = os.path.join(model_folder, "%s-%i.json" % (basename, i)) model_target = os.path.join(model_folder, "%s-%i.json" % (basename, i+1)) # generate the training command training = model_description['training'] training = training.replace("{{testing}}", data['testing']) training = training.replace("{{training}}", data['training']) training = training.replace("{{validation}}", data['validating']) training = training.replace("{{src_model}}", model_src) training = training.replace("{{target_model}}", model_target) training = training.replace("{{nntoolkit}}", utils.get_nntoolkit()) return training
def generate_training_command(model_folder): """Generate a string that contains a command with all necessary parameters to train the model.""" update_if_outdated(model_folder) model_description_file = os.path.join(model_folder, "info.yml") # Read the model description file with open(model_description_file, 'r') as ymlfile: model_description = yaml.load(ymlfile) # Get the data paths (hdf5 files) project_root = utils.get_project_root() data = {} data['training'] = os.path.join(project_root, model_description["data-source"], "traindata.hdf5") data['testing'] = os.path.join(project_root, model_description["data-source"], "testdata.hdf5") data['validating'] = os.path.join(project_root, model_description["data-source"], "validdata.hdf5") # Get latest model file basename = "model" latest_model = utils.get_latest_working_model(model_folder) if latest_model == "": logging.error("There is no model with basename '%s'.", basename) return None else: logging.info("Model '%s' found.", latest_model) i = int(latest_model.split("-")[-1].split(".")[0]) model_src = os.path.join(model_folder, "%s-%i.json" % (basename, i)) model_target = os.path.join(model_folder, "%s-%i.json" % (basename, i + 1)) # generate the training command training = model_description['training'] training = training.replace("{{testing}}", data['testing']) training = training.replace("{{training}}", data['training']) training = training.replace("{{validation}}", data['validating']) training = training.replace("{{src_model}}", model_src) training = training.replace("{{target_model}}", model_target) training = training.replace("{{nntoolkit}}", utils.get_nntoolkit()) return training
def generate_training_command(model_folder): """Generate a string that contains a command with all necessary parameters to train the model.""" update_if_outdated(model_folder) model_description_file = os.path.join(model_folder, "info.yml") # Read the model description file with open(model_description_file) as ymlfile: model_description = yaml.safe_load(ymlfile) # Get the data paths (hdf5 files) project_root = utils.get_project_root() data = {} data["training"] = os.path.join(project_root, model_description["data-source"], "traindata.hdf5") data["testing"] = os.path.join(project_root, model_description["data-source"], "testdata.hdf5") data["validating"] = os.path.join(project_root, model_description["data-source"], "validdata.hdf5") # Get latest model file basename = "model" latest_model = utils.get_latest_working_model(model_folder) if latest_model == "": logger.error( f"There is no model with basename '{basename}' in {model_folder}") return None logger.info(f"Model '{latest_model}' found.") i = int(latest_model.split("-")[-1].split(".")[0]) model_src = os.path.join(model_folder, f"{basename}-{i}.json") model_target = os.path.join(model_folder, f"{basename}-{i + 1}.json") # generate the training command training = model_description["training"] training = training.replace("{{testing}}", data["testing"]) training = training.replace("{{training}}", data["training"]) training = training.replace("{{validation}}", data["validating"]) training = training.replace("{{src_model}}", model_src) training = training.replace("{{target_model}}", model_target) training = training.replace("{{nntoolkit}}", utils.get_nntoolkit()) return training