Exemplo n.º 1
0
def run_experiment(config):

    if not os.path.exists(config["dataset_path"]):
        raise FileNotFoundError("Dataset not found at {}".format(
            config["dataset_path"]))

    if checkpoint_dir_exists(config):
        should_overwrite = ask_yes_no_question(
            "[PROMPT] Overwrite existing checkpoint? {}".format(
                config['checkpoint_path']))
        if should_overwrite:
            make_or_get_checkpoint_dir(config)
        else:
            print("Skipping experiment...")
            return

    print("===================================")
    print("Starting experiment for model {}".format(config["model"]))
    print("===================================")

    # store visuals and files
    model, hist = train(config)
    history_dict = hist.history
    plot_history(config, history_dict)
    # save_history(config, history_dict)
    #save_config(config)

    # test data and write results
    test(model, config, store_output=True, evaluate_splits=True)
Exemplo n.º 2
0
def run_train_validation_test_final(config):
    # store visuals and files
    print("Startzeit:")
    starttime = datetime.datetime.now()
    print(str(starttime))
    model, hist, score = trainfforvalidationandtest(config)
    history_dict = hist.history
    history_dict['validation_score'] = score
    plot_history(config, history_dict)
    save_history(config, history_dict)
    save_config(config)
    print("Meine final score für Validation set: \n")
    print(score)
    # test data and write results
    test(model, config, store_output=True, evaluate_splits=True)
    print("Endzeit:")
    endtime = datetime.datetime.now()
    print(str(endtime))
    totaltime = endtime - starttime
    print("Zeitinsgesamt:")
    print(str(totaltime))
Exemplo n.º 3
0
def test_checkpoint(checkpoint_path, evaluate_splits=True, store_output=False):
    config = load_config(checkpoint_path)
    metrics.BATCH_SIZE = config["batch_size"]  # set before the metric is compiled
    model = load_model(os.path.join(checkpoint_path, "weights.hdf5"), custom_objects={'motion_metric': motion_metric})
    test(model, config, evaluate_splits=evaluate_splits, store_output=store_output)