def param_search(configs, output_dir: Path, param_grid: dict, train, test):

    start_time = time()
    param_grid = ParameterGrid(param_grid)
    total_combinations = len(param_grid)
    results = np.zeros(total_combinations)
    times = np.zeros((total_combinations, 2))  # min, sec
    print("Total combinations: ", total_combinations)

    # No changes in data, so dataloaders just here:
    cfg = Config(config_override=configs)
    data_loader = DataLoader(cfg)

    for i in range(total_combinations):
        val = param_grid[i]
        param = list(sum(val.items(),
                         ()))  # magic that flattens list of tuples

        name = "".join([str(i) for i in val.values()]).replace(".", "")
        print("Checking: ", param, " name: ", name)
        new_output_dir = output_dir.joinpath(name)

        ind_of_output = configs.index("OUTPUT_DIR")
        configs[ind_of_output + 1] = configs[ind_of_output] + "\\" + name

        new_cfg = Config(config_override=configs + param)

        train(new_cfg, data_loader, new_output_dir)
        best_data_dict = get_best_dict(new_output_dir)
        results[i] = best_data_dict["Test_eval"]
        times[i] = best_data_dict["Proccess_time"]

        print(
            f"\n{i + 1}/{total_combinations}. Time passed: {divmod(time() - start_time, 60)}\n"
        )

    # Evaluate
    new_cfg = Config(config_override=configs)
    best_eval_i = 0
    if new_cfg.MODEL.EVAL_FUNC == "mse":
        best_eval_i = np.argmin(results)
    else:
        best_eval_i = np.argmax(results)

    results_dict = {
        "best_index": best_eval_i,
        "best_eval": results[best_eval_i],
        "best_param": param_grid[best_eval_i],
        "best_time": times[best_eval_i],
        "param": param_grid,
        "results": results,
        "times": times
    }
    write_json(results_dict, output_dir.joinpath("param_search_results.json"))

    data_loader_test = DataLoader(cfg, one_hot_encode=False)
    test(cfg, data_loader_test, {"Test_eval": results[best_eval_i]})

    print("Best eval: ", results[best_eval_i], " with param: ",
          param_grid[best_eval_i], ", time: ", times[best_eval_i])
def plot_l2():
    path_to_results = Path("Results", "d)MNISTClass_Regularisation")
    all_dir = [x for x in path_to_results.iterdir() if x.is_dir()]
    values_to_plot = {}
    steps_to_plot = {}
    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(
            config_file=Path(d).joinpath("classification_mnist_model.yaml"))

        last = get_previous_checkpoint_as_dict(d)

        new_key = f'Alpha: {cfg.OPTIM.ALPHA}'
        values_to_plot[new_key] = list(last["Val_eval"].values())
        steps_to_plot[new_key] = list(map(int, last["Val_eval"].keys()))

    info_to_add = {}
    ylimit = None  #(0.01, 0.04) #
    xlimit = None  #(0, 50000) #
    save_fig = True
    plot_values_with_steps_and_info(steps_to_plot,
                                    values_to_plot,
                                    title="L2 Regularisation on MNIST",
                                    xlimit=xlimit,
                                    ylabel="Accuracy",
                                    info_to_add=info_to_add,
                                    ylimit=ylimit,
                                    save_fig=save_fig)
def analyse_nodes_func():
    path_to_results = Path("Results", "d)MNIST_Num_Of_Nodes")
    all_dir = [x for x in path_to_results.iterdir() if x.is_dir()]
    values_to_plot = {}
    steps_to_plot = {}
    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(
            config_file=Path(d).joinpath("classification_mnist_model.yaml"))
        last = get_previous_checkpoint_as_dict(d)
        hidden_layers = cfg.MODEL.HIDDEN_LAYERS
        act = cfg.MODEL.ACTIVATION_FUNCTIONS[:-1]

        new_key = f"{hidden_layers}_{act}"
        values_to_plot[new_key] = list(last["Val_eval"].values())
        steps_to_plot[new_key] = list(map(int, last["Val_eval"].keys()))

    info_to_add = {}
    ylimit = None  #(0.01, 0.04) #
    xlimit = (0, 10000)  #
    save_fig = False
    plot_values_with_steps_and_info(steps_to_plot,
                                    values_to_plot,
                                    title="MNIST Leaky ReLU",
                                    xlimit=xlimit,
                                    ylabel="Accuracy",
                                    info_to_add=info_to_add,
                                    ylimit=ylimit,
                                    save_fig=save_fig)


#analyse_nodes_func()
def analyse_weight_init_activ(leaky=False):
    path_to_results = Path("Results", "d)MNISTClass_Weigth_Act")
    all_dir = [x for x in path_to_results.iterdir() if x.is_dir()]
    values_to_plot = {}
    steps_to_plot = {}
    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(
            config_file=Path(d).joinpath("classification_mnist_model.yaml"))
        if (leaky and cfg.MODEL.ACTIVATION_FUNCTIONS[0] == "leaky_relu") or (
                not leaky
                and cfg.MODEL.ACTIVATION_FUNCTIONS[0] != "leaky_relu"):
            last = get_previous_checkpoint_as_dict(d)

            weight_init = cfg.MODEL.WEIGHT_INIT
            act = cfg.MODEL.ACTIVATION_FUNCTIONS[0]

            new_key = f"{weight_init}_{act}"
            values_to_plot[new_key] = list(last["Val_eval"].values())
            steps_to_plot[new_key] = list(map(int, last["Val_eval"].keys()))

    info_to_add = {}
    ylimit = None  #(0.01, 0.04) #
    xlimit = None  #(0, 50000) #
    save_fig = True
    plot_values_with_steps_and_info(steps_to_plot,
                                    values_to_plot,
                                    title="MNIST Weight Init and Activations",
                                    xlimit=xlimit,
                                    ylabel="Accuracy",
                                    info_to_add=info_to_add,
                                    ylimit=ylimit,
                                    save_fig=save_fig)
Exemplo n.º 5
0
def get_all_results_for_weight_init(path: Path, leaky=False):
    weight_inits = ['random', 'he', 'xavier', 'zeros']
    all_dir = [x for x in path.iterdir() if x.is_dir()]
    results = []

    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(config_file=Path(d).joinpath("multilayer_model.yaml"))
        if (leaky and cfg.MODEL.ACTIVATION_FUNCTIONS[0] == "leaky_relu") or (
                not leaky
                and cfg.MODEL.ACTIVATION_FUNCTIONS[0] != "leaky_relu"):
            best = load_best_checkpoint(d)
            last_ckp = get_previous_checkpoints(d)[0]
            last = load_data_as_dict(Path(d).joinpath(last_ckp))
            new_val = list(last["Val_eval"].values())
            new_steps = list(map(int, last["Val_eval"].keys()))
            results.append({
                "WEIGHT_INIT": cfg.MODEL.WEIGHT_INIT,
                "ACTIVATION": cfg.MODEL.ACTIVATION_FUNCTIONS[0],
                "LEAKY_SLOPE": cfg.MODEL.LEAKY_SLOPE,
                "Eval": best["Test_eval"],
                "Time": best["Proccess_time"],
                "Step": best["Step"],
                "Val_eval": new_val,
                "Val_steps": new_steps,
                "Name": d
            })

    return results
def get_all_results_for_weight_init(path: Path):
    weight_inits = ['random', 'he', 'xavier']
    all_dir = [x for x in path.iterdir() if x.is_dir()]
    results = {}

    for w in weight_inits:
        results[w] = []

    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(config_file=Path(d).joinpath("logistic_reg_mnist.yaml"))
        best = load_best_checkpoint(d)
        last_ckp = get_previous_checkpoints(d)[0]
        last = load_data_as_dict(Path(d).joinpath(last_ckp))
        new_val = list(last["Val_eval"].values())
        new_steps = list(map(int, last["Val_eval"].keys()))
        results[cfg.MODEL.WEIGHT_INIT].append({
            "WEIGHT_INIT": cfg.MODEL.WEIGHT_INIT,
            "LR": cfg.OPTIM.LR,
            "ALPHA": cfg.OPTIM.ALPHA,
            "Eval": best["Test_eval"],
            "Time": best["Proccess_time"],
            "Step": best["Step"],
            "Name": d
        })
        # "Val_eval": new_val, "Val_steps": new_steps,

    return results
Exemplo n.º 7
0
def analyse_results(results, round_up_to: float = 1, save_fig=False):
    min_val = get_min_value(results, "Eval")
    print("Best val: ", min_val)
    best_checkpoint = load_best_checkpoint(min_val["Name"])

    cfg = Config(config_file=Path(min_val["Name"], "sgd.yaml"))
    p = str(cfg.DATA.FRANKIE.P)

    time_for_best_run = f'{min_val["Time"][0]:.0f} min {min_val["Time"][1]:.0f}'
    best_test_eval = f'{min_val["Eval"]:.5f}'

    # HEAT_MAP
    info_to_add = {}
    s_results = unpack(results, replace_val_bigger=inf)
    position_index = s_results.index.get_loc(min_val["batch_size"])
    position_column = s_results.columns.get_loc(min_val["LR"])

    show_heatmap(s_results,
                 info_to_add=info_to_add,
                 patch_placement=(position_column, position_index),
                 title=f"SGD on Franke p={p}",
                 xlabel='Learning rate',
                 ylabel='Batch size',
                 show_bar=False,
                 save_fig=save_fig)

    print(f'{min_val["Eval"]} replacing with: {round_up_to}')
    s_results = unpack(results, replace_val_bigger=round_up_to)
    show_heatmap(s_results,
                 info_to_add=info_to_add,
                 patch_placement=(position_column, position_index),
                 title=f"SGD on Franke p={p} (Filtered)",
                 xlabel='Learning rate',
                 ylabel='Batch size',
                 show_bar=True,
                 save_fig=save_fig)

    new_info = f'={p}, test score={best_test_eval}, time: {time_for_best_run}'
    # PLOTS
    info_to_add = {
        "p": new_info,
        "File name: ": str(min_val["Name"]).replace("\\", "_"),
    }
    print(info_to_add)
    plot_lr_tran_val(best_checkpoint,
                     ylimit=(0.0, 0.1),
                     title=f'Best Run Zoomed In p={p}',
                     info_to_add=info_to_add,
                     save_fig=save_fig)
    plot_lr_tran_val(best_checkpoint,
                     ylimit=(0.0, 1.0),
                     title=f'Best Run p={p}',
                     info_to_add=info_to_add,
                     save_fig=save_fig)
Exemplo n.º 8
0
def analyse_ridge_results(results,
                          values_to_unpack_on=("LR_DECAY", "ALPHA"),
                          round_up_to: float = 1,
                          save_fig=False):
    min_val = get_min_value(results, "Eval")
    print("Best val: ", min_val)
    best_checkpoint = load_best_checkpoint(min_val["Name"])

    cfg = Config(config_file=Path(min_val["Name"], "sgd.yaml"))
    p = str(cfg.DATA.FRANKIE.P)

    time_for_best_run = f'{min_val["Time"][0]:.0f} min {min_val["Time"][1]:.0f}'
    best_test_eval = f'{min_val["Eval"]:.5f}'

    # HEAT_MAP
    info_to_add = {}
    s_results = unpack(results,
                       values_to_unpack_on=values_to_unpack_on,
                       replace_val_bigger=inf)
    position_index = s_results.index.get_loc(min_val[values_to_unpack_on[0]])
    position_column = s_results.columns.get_loc(
        min_val[values_to_unpack_on[1]])

    show_heatmap(s_results,
                 info_to_add=info_to_add,
                 patch_placement=(position_column, position_index),
                 title=f"Ridge SGD on Franke p={p}",
                 xlabel='ALPHA',
                 ylabel='LR_DECAY',
                 show_bar=False,
                 save_fig=save_fig)

    new_info = f'test score={best_test_eval}, time: {time_for_best_run}'
    # PLOTS
    info_to_add = {
        "Info: ": new_info,
        "File name: ": str(min_val["Name"]).replace("\\", "_"),
    }
    print(info_to_add)
    #plot_lr_tran_val(best_checkpoint, ylimit = (0.0, 0.1), title = f'Best Run Zoomed In p={p}', info_to_add = info_to_add, save_fig = save_fig)
    #plot_lr_tran_val(best_checkpoint,  ylimit = (0.0, 1.0), title = f'Best Run p={p}', info_to_add = info_to_add, save_fig = save_fig)


#ridge_results = get_results_where(get_ridge_results(path_to_results), "LR", 1e-3)
#analyse_ridge_results(ridge_results, save_fig = True)
Exemplo n.º 9
0
def get_ridge_results(path: Path):
    all_dir = [x for x in path.iterdir() if x.is_dir()]
    results = []

    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(config_file=Path(d).joinpath("sgd.yaml"))
        best = load_best_checkpoint(d)

        results.append({
            "LR": cfg.OPTIM.LR,
            "LR_DECAY": cfg.OPTIM.LR_DECAY,
            "ALPHA": cfg.OPTIM.ALPHA,
            "Eval": best["Test_eval"],
            "Time": best["Proccess_time"],
            "Step": best["Step"],
            "Name": d
        })
    return results
Exemplo n.º 10
0
def get_all_results_for_p(path: Path):
    polynomials = [2, 5, 10, 15]
    all_dir = [x for x in path.iterdir() if x.is_dir()]
    results = {}
    for pol in polynomials:
        results[pol] = []

    for i in range(len(all_dir)):
        d = all_dir[i]
        cfg = Config(config_file=Path(d).joinpath("sgd.yaml"))
        best = load_best_checkpoint(d)

        results[cfg.DATA.FRANKIE.P].append({
            "LR": cfg.OPTIM.LR,
            "batch_size": cfg.OPTIM.BATCH_SIZE,
            "Eval": best["Test_eval"],
            "Time": best["Proccess_time"],
            "Step": best["Step"],
            "Name": d
        })

    return results
Exemplo n.º 11
0
def analyse_results(results,
                    values_to_analyse=("LR_DECAY", "LR"),
                    round_up_to: float = 1,
                    save_fig=False):
    min_val = get_min_value(results, "Eval")  # MAX WHEN ACC AND MIN WHEN MSE
    print("Best val: ", min_val)
    best_checkpoint = load_best_checkpoint(min_val["Name"])

    cfg = Config(config_file=Path(min_val["Name"], "multilayer_model.yaml"))
    p = str(cfg.MODEL.WEIGHT_INIT)

    time_for_best_run = f'{min_val["Time"][0]:.0f} min {min_val["Time"][1]:.0f}'
    best_test_eval = f'{min_val["Eval"]:.5f}'

    # HEAT_MAP
    info_to_add = {}
    s_results = unpack(results,
                       values_to_unpack_on=values_to_analyse,
                       replace_val_bigger=inf)
    position_index = s_results.index.get_loc(min_val[values_to_analyse[0]])
    position_column = s_results.columns.get_loc(min_val[values_to_analyse[1]])

    show_heatmap(s_results,
                 info_to_add=info_to_add,
                 patch_placement=(position_column, position_index),
                 title=f"Franke NN",
                 xlabel=values_to_analyse[1],
                 ylabel=values_to_analyse[0],
                 show_bar=True,
                 save_fig=save_fig)

    new_info = f'test score={best_test_eval}, time: {time_for_best_run}'
    # PLOTS
    info_to_add = {
        "Results: ": new_info,
        "File name: ": str(min_val["Name"]).replace("\\", "_"),
    }
    print(info_to_add)
Exemplo n.º 12
0
    'xavier',
    'MODEL.LEAKY_SLOPE',
    0.1,
    "MODEL.EVAL_FUNC",
    "acc",
    "MODEL.COST_FUNCTION",
    "ce",
    "DATA.NAME",
    "mnist",
    "DATA.MNIST.BINARY",
    [],  # all classes
    "OUTPUT_DIR",
    "d)MNIST_Num_Of_Nodes"
]

cfg = Config(config_override=config_override)
output_dir = ROJECT_ROOT_DIR.joinpath(cfg.OUTPUT_DIR)

data_loader = DataLoader(cfg)
train_save_configs(cfg, data_loader, output_dir)
best_data_dict = load_best_checkpoint(output_dir)
test(cfg, data_loader, best_data_dict)
plot_lr_tran_val(best_data_dict)

# ------------------------Parameter search-----------------------------------

param_grid = {
    #'OPTIM.LR_DECAY': [0.0, 0.6, 0.9],
    'OPTIM.REGULARISATION': ["l2"],
    'OPTIM.ALPHA': [0.1, 0.5, 0.9],
}
def analyse_results(results,
                    values_to_analyse=("LR", "ALPHA"),
                    round_up_to: float = 1,
                    save_fig=False):
    min_val = get_max_value(results, "Eval")  # MAX WHEN ACC AND MIN WHEN MSE
    print("Best val: ", min_val)
    best_checkpoint = load_best_checkpoint(min_val["Name"])

    cfg = Config(config_file=Path(min_val["Name"], "logistic_reg_mnist.yaml"))
    p = str(cfg.MODEL.WEIGHT_INIT)

    time_for_best_run = f'{min_val["Time"][0]:.0f} min {min_val["Time"][1]:.0f}'
    best_test_eval = f'{min_val["Eval"]:.5f}'

    # HEAT_MAP
    new_info = f'test score={best_test_eval}, time: {time_for_best_run}'
    info_to_add = {"Best result: ": new_info}
    s_results = unpack(results,
                       values_to_unpack_on=values_to_analyse,
                       replace_val_bigger=inf)
    position_index = s_results.index.get_loc(min_val[values_to_analyse[0]])
    position_column = s_results.columns.get_loc(min_val[values_to_analyse[1]])

    show_heatmap(s_results,
                 info_to_add=info_to_add,
                 patch_placement=(position_column, position_index),
                 title=f"Logistic Regression {p}",
                 xlabel=values_to_analyse[1],
                 ylabel=values_to_analyse[0],
                 show_bar=True,
                 save_fig=save_fig)


# path_to_results = Path("Results", "e)LogisticReg")
# all_results = get_all_results_for_weight_init(path_to_results)

# plt.rcParams['font.size'] = 16 # To set the size of all plots to be bigger
# for res in all_results:
#     analyse_results(all_results[res], save_fig=True)

# values_to_plot = {}
# steps_to_plot = {}
# for res in all_results:
#     min_val = get_max_value(all_results[res], "Eval") # MAX WHEN ACC AND MIN WHEN MSE
#     print("Best val: ", min_val)

#     cfg = Config(config_file = Path(min_val["Name"], "logistic_reg_mnist.yaml"))

#     last = get_previous_checkpoint_as_dict(min_val["Name"])

#     new_key = f"{cfg.MODEL.WEIGHT_INIT}"#_{cfg.OPTIM.LR}_{cfg.OPTIM.ALPHA}"
#     values_to_plot[new_key] = list(last["Val_eval"].values())
#     steps_to_plot[new_key] = list(map(int, last["Val_eval"].keys()))

# plt.rcParams['font.size'] = 16 # To set the size of all plots to be bigger

# info_to_add = {}
# ylimit = (0.7, 1.0) #
# xlimit = None #(0, 50000) #
# save_fig = True
# plot_values_with_steps_and_info(steps_to_plot, values_to_plot, title = "Logistic Regression Best Runs (Zoomed)", xlimit = xlimit, ylabel = "Error",  info_to_add = info_to_add, ylimit = ylimit, save_fig = save_fig)

# best_runs_for_class = [Path('Results/e)LogisticReg/00105he'), ]