Example #1
0
    def run_nested():
        input_s, target_s, test_input_s, inner_dim, activ_fun, data_frame = MicheliDataset.init()

        # plot_mds(target_s, "target")

        exp = ExperimentSettings(
            performance_function=LOSS_DICT["mse"].f,
            select_function=min
        )

        optimizer = [OPTIMIZER_DICT['SGD'](), OPTIMIZER_DICT['ADAMAX'](), OPTIMIZER_DICT['ADAM']()]

        hyp = HyperParameters(
            inner_dimension=inner_dim,
            activation_function=activ_fun,
            epochs=[250],
            batch_size=[1],
            optimizer=optimizer,
            task_type=[TaskType.classification]
        )

        fold_s = KFolds.double_cross_validation_folds(data_indexes=list(range(len(target_s))), external_folds_dim=3, internal_folds_dim=4)
        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.double_cross_validation(n_workers=50)
        pickle.dump(res, open("cup_nested_res.p", "wb"))
Example #2
0
    def run():

        input_s, target_s, test_input_s, test_target_s, inner_dim, activ_fun = MonkTest.init(
            3)

        exp = ExperimentSettings(
            # performance_function=accuracy,
            # select_function=max,
        )

        optimizer = OPTIMIZER_DICT['ADAMAX']()

        hyp = HyperParameters(inner_dimension=inner_dim,
                              activation_function=activ_fun,
                              epochs=[500],
                              batch_size=[1],
                              optimizer=[optimizer],
                              task_type=[TaskType.regression])

        # print(NeuralNet.train_without_ms(input_s, target_s, test_input_s, test_target_s, hyp, exp))

        fold_s = KFolds.cross_validation_folds(data_indexes=list(
            range(len(target_s))),
                                               folds_number=6)

        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.run_grid_search(n_workers=80)
        pickle.dump(res, open("monk3_validation_res.p", "wb"))

        best_nets = []
        for r in res["results_list"]:
            if r["avg_ts_score"] == res["best_score"][1]:
                best_nets.append(r["params"])
Example #3
0
    def run():

        input_s, target_s, inner_dim, activ_fun = Test1.init()

        exp = ExperimentSettings()
        hyp = HyperParameters(inner_dimension=inner_dim, activation_function=activ_fun)
        fold_s = KFolds.cross_validation_folds(data_indexes=list(range(len(target_s))), folds_number=2)
        # fold_s = KFolds.double_cross_validation_folds(data_indexes=list(range(len(target_s))), external_folds_dim=1, internal_folds_dim=2)

        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.run_grid_search(n_workers=1)
        # res = mod_sel.double_cross_validation(n_workers=2)

        print(res)
        plt.show()
Example #4
0
    def run():

        input_s, target_s, test_input_s, test_target_s, inner_dim, activ_fun = MonkTest.init(
            2)

        exp = ExperimentSettings(
            performance_function=accuracy,
            select_function=max,
        )

        optimizer1 = OPTIMIZER_DICT['SGD'](lr=0.001,
                                           momentum=0.1,
                                           nesterov=True)
        optimizer2 = OPTIMIZER_DICT['ADAM']()
        optimizer3 = OPTIMIZER_DICT['ADAMAX']()

        hyp = HyperParameters(inner_dimension=inner_dim,
                              activation_function=activ_fun,
                              epochs=[500],
                              batch_size=[1],
                              optimizer=[optimizer1],
                              task_type=[TaskType.classification])

        #print(NeuralNet.train_without_ms(input_s, target_s, test_input_s, test_target_s, hyp, exp))

        fold_s = KFolds.cross_validation_folds(data_indexes=list(
            range(len(target_s))),
                                               folds_number=2)

        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.run_grid_search(n_workers=1)
        pickle.dump(res, open("monk2_validation_res.p", "wb"))

        best_nets = []
        for r in res["results_list"]:
            if r["avg_ts_score"] == res["best_score"][1]:
                best_nets.append(r["params"])

        print("Evaluating best net:")
        evaluate_and_plot(input_s, target_s, test_input_s, test_target_s,
                          best_nets)
Example #5
0
    def run():
        tr_dim = 160
        ts_dim = 500

        input_s = [[(x / tr_dim)] for x in range(tr_dim)]
        target_s = [[np.sin(inp[0] * 15) + np.cos(inp[0] * 2)] for inp in input_s]

        test_input_s = [[x / ts_dim] for x in range(ts_dim)]
        test_target_s = [[np.sin(inp[0] * 15) + np.cos(inp[0] * 2)] for inp in test_input_s]

        plot_sorted(input_s, target_s, "train")
        plot_sorted(test_input_s, test_target_s, "test")

        inner_dim = [[30, 20, 30, 1]]
        activ_fun = [[ACTIVATION_DICT["tanh"],
                      ACTIVATION_DICT["tanh"],
                      ACTIVATION_DICT["tanh"],
                      ACTIVATION_DICT["linear"]]]

        exp = ExperimentSettings()

        hyp = HyperParameters(
            inner_dimension=inner_dim,
            activation_function=activ_fun,
            epochs=[300],
            batch_size=[1])

        fold_s = KFolds.cross_validation_folds(data_indexes=list(range(len(target_s))), folds_number=2)

        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.run_grid_search(n_workers=2)

        # nets = [net["score"]["trained_net"] for net in res["results_list"][res["best_score"][0]]["single_result"]]
        nets = [net["single_result"][0]["score"]["trained_net"] for net in res["results_list"]]

        [plot_sorted(test_input_s, net.predict(test_input_s), net.name) for net in nets]

        plt.show()
Example #6
0
    def run():

        input_s, target_s, test_input_s, test_target_s, inner_dim, activ_fun, \
            data_frame, test_data_frame = MicheliModelSelection.init()

        exp = ExperimentSettings(
            performance_function=LOSS_DICT["mee"].f,
            select_function=min
        )

        optimizer = [OPTIMIZER_DICT['SGD'](), OPTIMIZER_DICT['ADAMAX'](), OPTIMIZER_DICT['ADAM']()]

        hyp = HyperParameters(
            inner_dimension=inner_dim,
            activation_function=activ_fun,
            epochs=[250],
            batch_size=[1],
            optimizer=optimizer,
            task_type=[TaskType.classification]
        )

        fold_s = KFolds.cross_validation_folds(data_indexes=list(range(len(target_s))), folds_number=2)

        task = Task(input_s, target_s, fold_s)
        mod_sel = ModelSelection(task, exp, hyp)

        res = mod_sel.run_grid_search(n_workers=1)
        pickle.dump(res, open("cup_validation_res.p", "wb"))

        best_nets = []
        for r in res["results_list"]:
            if r["avg_ts_score"] == res["best_score"][1]:
                best_nets.append(r["params"])

        evaluate_and_plot(input_s, target_s, test_input_s, test_target_s, best_nets)

        score = exp.params["performance_function"](prediction, target_s)
        print("score: ", score)

        plt.show()