def model_complexity_exp_max_attempt_ga(self):
        #TODO should we create a new learner object??
        scoring = 'accuracy'
        nn_model1 = mlrose.NeuralNetwork(hidden_nodes = [50,50,50,50], activation = 'relu', \
                                 algorithm = 'genetic_alg', max_iters = 1000, \
                                 bias = True, is_classifier = True, learning_rate = 0.0001, \
                                 early_stopping = True, clip_max = 5, max_attempts = 100, \
                                 pop_size= 2000, random_state = 3)

        expHelper = ExperimentHelper(self.splitter, nn_model1, 'GA')
        param_range = [20, 50, 100, 200, 300]
        expHelper.model_complexity_exp('max_attempts', param_range)
    def model_complexity_exp(self):
        scoring = 'accuracy'
        nn_model1 = mlrose.NeuralNetwork(hidden_nodes = [50,50,50,50], activation = 'relu', \
                                 algorithm = 'random_hill_climb', max_iters = 1000, \
                                 bias = True, is_classifier = True, learning_rate = 0.0001, \
                                 early_stopping = True, clip_max = 5, max_attempts = 100, \
                                 random_state = 3)

        expHelper = ExperimentHelper(self.splitter, nn_model1, 'RHC')
        param_range = np.array([0.0001, 0.001, 0.002, 0.003, 0.005, 0.008])
        #param_range = np.array([100, 200,300,400, 500])
        expHelper.model_complexity_exp('learning_rate', param_range)
 def experiment_run_test_bank(self):
     self.learner = ANNLearner(activation='relu',
                               alpha=0.01,
                               hidden_layer_sizes=(
                                   50,
                                   50,
                                   50,
                               ),
                               learning_rate='constant',
                               solver='adam',
                               early_stopping=False)
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()
Exemple #4
0
    def model_complexity_exp2(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='linear', C=0.2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear4', 'Linear Kernel')
        param_range = np.array([1, 2, 4, 5, 6, 7, 8])
        self.expHelper.model_complexity_exp('gamma', param_range)

        self.learner = SVMLearner(kernel='linear', C=0.2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear5', 'Linear Kernel')
        param_range = np.array([0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8])
        self.expHelper.model_complexity_exp('gamma', param_range)
    def model_complexity_exp_epoch_no_stop_sa(self):
        #TODO should we create a new learner object??
        scoring = 'accuracy'

        nn_model1 = mlrose.NeuralNetwork(hidden_nodes = [50,50,50,50], activation = 'relu', \
                                 algorithm = 'simulated_annealing', max_iters = 1000, \
                                 bias = True, is_classifier = True, learning_rate = 0.0001, \
                                 early_stopping = False, clip_max = 5, max_attempts = 100, \
                                 random_state = 3)

        expHelper = ExperimentHelper(self.splitter, nn_model1, 'SA')
        param_range = [1, 10, 50, 100, 200, 500]
        expHelper.model_complexity_exp('max_iters', param_range)
 def learning_curve_iter2_bank(self):
     self.learner = ANNLearner(activation='relu',
                               alpha=0.01,
                               hidden_layer_sizes=(
                                   50,
                                   50,
                                   50,
                               ),
                               learning_rate='constant',
                               solver='adam',
                               early_stopping=False)
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       '-iter-3')
     self.expHelper.learning_curve_exp()
 def experiment_run_test_bank_iter2(self):
     self.learner = ANNLearner(activation='relu',
                               alpha=0.06,
                               hidden_layer_sizes=(200, 200, 200, 200, 200,
                                                   200, 200),
                               learning_rate='constant',
                               solver='adam',
                               early_stopping=True,
                               max_iter=600,
                               momentum=0.4)
     print('100, 100, 100, 100, 100, 100,50 alpha 0.3')
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()
     """ self.learner = ANNLearner(
    def model_complexity_exp_max_attempts_sa(self):
        #TODO should we create a new learner object??
        scoring = 'accuracy'
        decay = mlrose.decay.ExpDecay()
        nn_model1 = mlrose.NeuralNetwork(hidden_nodes = [50,50,50,50], activation = 'relu', \
                                 algorithm = 'simulated_annealing', max_iters = 1000, \
                                 bias = True, is_classifier = True, learning_rate = 0.0001, \
                                 early_stopping = True, clip_max = 5, max_attempts = 100, \
                                 schedule = decay,
                                 random_state = 3)

        expHelper = ExperimentHelper(self.splitter, nn_model1, 'SA')
        param_range = [20, 50, 100, 200, 300]
        expHelper.model_complexity_exp('max_attempts', param_range)
        print('completed max attempt sa nn')
    def model_complexity_exp_alpha1(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(hidden_layer_sizes=(300, ))
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '1')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        self.expHelper.model_complexity_exp('alpha', param_range)

        self.learner = ANNLearner(hidden_layer_sizes=(390, ))
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.008])
        self.expHelper.model_complexity_exp('alpha', param_range)
 def model_complexity_exp_epoch(self):
     #TODO should we create a new learner object??
     self.learner = ANNLearner(activation='relu',
                               alpha=0.01,
                               hidden_layer_sizes=(
                                   50,
                                   50,
                                   50,
                               ),
                               learning_rate='constant',
                               solver='adam',
                               early_stopping=True)
     self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
     #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
     #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
     param_range = np.array([1, 10, 50, 100, 200, 500])
     self.expHelper.model_complexity_exp('max_iter', param_range)
Exemple #11
0
    def model_complexity_exp6(self):
        #TODO should we create a new learner object??
        #self.learner = SVMLearner(kernel = 'rbf', C =0.1)
        self.learner = SVMLearner(kernel='rbf', C=0.5)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf8',
                                          'rbf Kernel')
        #param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.5,2, 3, 4, 5, 6])
        param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)

        self.learner = SVMLearner(kernel='rbf', C=0.5)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf9',
                                          'rbf Kernel')
        param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.5,2, 3, 4, 5, 6])
        #param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)
        """self.learner = SVMLearner(kernel = 'rbf', C =0.2)
Exemple #12
0
    def model_complexity_exp5(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf1',
                                          'rbf Kernel')

        param_range = np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.expHelper.model_complexity_exp('C', param_range)

        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf2',
                                          'rbf Kernel')
        param_range = np.array([0.5, 1, 1.5, 2])

        self.expHelper.model_complexity_exp('C', param_range)

        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf3',
                                          'rbf Kernel')

        param_range = np.array([2, 3, 4, 5, 6, 9, 12])

        self.expHelper.model_complexity_exp('C', param_range)
Exemple #13
0
 def experiment_run_test_bank(self):
     self.learner = SVMLearner(gamma=0.03, C=0.2, kernel='rbf')
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()
Exemple #14
0
 def experiment_run_test_wine_linear(self):
     self.learner = LinearSVMLearner(fit_intercept=True, C=0.05)
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       'Linear Kernel')
     self.expHelper.experiment_run_test()
Exemple #15
0
 def learning_curve_iter2_bank(self):
     self.learner = SVMLearner(gamma=0.03, C=0.2, kernel='rbf')
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       '-iter-2')
     self.expHelper.learning_curve_exp()
Exemple #16
0
 def learning_curve_iter2_wine_linear(self):
     self.learner = LinearSVMLearner(fit_intercept=True, C=0.05)
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       'Linear Kernel')
     self.expHelper.learning_curve_exp()
class BoostingExp:
    def __init__(self, reader, helper, splitter):
        self.reader = reader
        self.helper = helper
        self.learner = BoostLearner()
        self.splitter = splitter
        self.expHelper = ExperimentHelper(self.splitter, self.learner)

    def experiment(self):

        # Perform learning curve
        #self.expHelper.learning_curve_exp()
        self.model_complexity_exp()
        self.model_complexity_exp_2()
        if (self.splitter.reader.dataset == 'Bank'):
            print('bank')
            self.learning_curve_iter2_bank()
            self.experiment_run_test_bank()
        else:
            self.experiment_run_test_wine()
            self.learning_curve_iter2_wine()

        #self.learner.train(self.splitter.X_train, self.splitter.y_train)
        #y_pred = self.learner.query(self.splitter.X_test)
        """print("Final Accuracy for " + str(self.learner.__class__)+": ", 
                        metrics.accuracy_score(self.splitter.y_test, y_pred))"""

    def model_complexity_exp(self):
        #TODO should we create a new learner object??

        self.learner = BoostLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        param_range = np.array([2, 4, 6, 8, 9, 10])
        self.expHelper.model_complexity_exp('max_depth', param_range)

    def model_complexity_exp_2(self):
        #TODO should we create a new learner object??

        self.learner = BoostLearner(max_depth=2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        param_range = np.array([0.1, 0.2, 0.4, 0.6, 0.8, 0.9])
        self.expHelper.model_complexity_exp('learning_rate', param_range)

    def learning_curve_iter2_wine(self):
        self.learner = BoostLearner(max_depth=9, learning_rate=0.4)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_bank(self):
        self.learner = BoostLearner(max_depth=2, learning_rate=0.1)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def experiment_run_test_wine(self):
        self.learner = BoostLearner(max_depth=9, learning_rate=0.4)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank(self):
        self.learner = BoostLearner(max_depth=2, learning_rate=0.1)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
 def learning_curve_iter2_wine(self):
     self.learner = DTLearner(max_depth=4, max_leaf_nodes=5)
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       '-iter-2')
     self.expHelper.learning_curve_exp()
Exemple #19
0
 def experiment_run_test_bank(self):
     self.learner = KNNLearner(metric='euclidean',
                               n_neighbors=38,
                               weights='uniform')
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()
Exemple #20
0
 def __init__(self, problem_type):
     self.expHelper = ExperimentHelper()
     self.problem_type = problem_type
     self.rand_seeds = self.expHelper.create_random_seeds()
     pass
Exemple #21
0
class ExperimentRunnerKnapsack:
    def __init__(self, problem_type):
        self.expHelper = ExperimentHelper()
        self.problem_type = problem_type
        self.rand_seeds = self.expHelper.create_random_seeds()
        pass

    def experiment(self):

        self.experiment_rhc_2()
        self.experiment_rhc_3()

        self.experiment_sa_2()
        self.experiment_sa_3()
        self.experiment_sa_4()
        self.experiment_sa_5()
        self.experiment_sa_6()
        self.experiment_sa_7()

        #self.experiment_ga_1()
        self.experiment_ga_2()
        self.experiment_ga_3()
        self.experiment_ga_4()
        self.experiment_ga_5()

        #self.experiment_mimic_1()
        self.experiment_mimic_2()
        self.experiment_mimic_3()
        self.experiment_mimic_4()
        self.experiment_mimic_5()

        self.experiment_sa()
        self.experiment_ga()
        self.experiment_mimc()
        self.experiment_optimal_rhc()
        self.experiment_optimal_sa()
        self.experiment_optimal_ga()
        self.experiment_optimal_mimic()

    def experiment_rhc_1(self):
        print('restart vary')
        init_state = None
        restart_lengths = np.arange(10, 800, 100)
        result = np.zeros((len(self.rand_seeds), len(restart_lengths)))
        #best_state = np.zeros((len(self.rand_seeds), len(restart_lengths)))
        print(self.problem_type)
        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            prob_length = 20
            for j in range(len(restart_lengths)):
                restart_length = restart_lengths[j]
                max_iter = np.inf
                #max_attempts is varied by trial and error
                max_attempts = 100
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                alg = RHC(problem, init_state, rand_state, max_attempts,
                          max_iter, restart_length.item())
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        print('best fitness')
        print(str(result))
        print('best state')
        print(best_state)
        avg_result = np.mean(result, axis=0)
        print('avg result for varying input size' + str(avg_result))
        title = self.problem_type + ' with RHC - # of Restarts Variation'
        plot_curve(restart_lengths, avg_result, title, '# of Restarts',
                   'Best Score')

    def experiment_rhc_11(self):
        init_state = None
        prob_lengths = np.arange(7, 30)
        result = np.zeros((len(self.rand_seeds), len(prob_lengths)))
        print(self.problem_type)
        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(prob_lengths)):
                prob_length = prob_lengths[j]
                fl = CustomProblem(prob_length.item(), self.problem_type)
                problem = fl.create_problem()
                alg = RHC(problem, init_state, rand_state, 10, 1000)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        print(str(result))
        avg_result = np.mean(result, axis=0)
        print('avg result for varying input size' + str(avg_result))
        title = self.problem_type + ' with RHC - Input Size Variation'
        plot_curve(prob_lengths, avg_result, title, 'Input Size', 'Best Score')

    def experiment_rhc_2(self):
        init_state = None
        max_attempts = np.array(
            [5, 10, 15, 30, 40, 50, 60, 80, 100, 200, 300, 350])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))
        best_score = None
        for i in range(len(self.rand_seeds)):
            restarts = 300
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                max_iter = np.inf
                alg = RHC(problem, init_state, rand_state, max_attempt,
                          max_iter, restarts)
                best_score, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best score')
        print(best_score)
        title = self.problem_type + ' with RHC - Max Attempts Variation'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_rhc_22(self):
        init_state = None
        max_attempts = np.array(
            [5, 10, 15, 30, 40, 50, 60, 80, 100, 200, 300, 350])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))
        best_score = None
        for i in range(len(self.rand_seeds)):
            restarts = 0
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                max_iter = np.inf
                alg = RHC(problem, init_state, rand_state, max_attempt,
                          max_iter, restarts)
                best_score, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best score')
        print(best_score)
        title = self.problem_type + ' with RHC - Max Attempts Variation - 0 restart'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_rhc_3(self):
        init_state = None
        max_iters = np.arange(100, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                restarts = 150
                max_iter = max_iters[j].item()
                alg = RHC(problem, init_state, rand_state, max_attempt,
                          max_iter, restarts)
                best_score, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best score')
        print(best_score)
        title = self.problem_type + ' with RHC - Max Iterations Variation'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_rhc_4(self):
        init_state = None
        t_pcts = np.arange(0.1, 1, 0.1)
        result = np.zeros((len(self.rand_seeds), len(t_pcts)))
        best_score = None
        max_iter = np.inf
        for i in range(len(self.rand_seeds)):
            restarts = 400
            rand_state = self.rand_seeds[i]
            for j in range(len(t_pcts)):
                prob_length = 20
                restarts = 400
                max_attempt = 50
                t_pct = t_pcts[j].item()
                fl = SixPeaks(prob_length, t_pct)
                problem = fl.create_problem()

                alg = RHC(problem, init_state, rand_state, max_attempt,
                          max_iter, restarts)
                best_score, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best score')
        print(best_score)
        title = self.problem_type + ' with RHC - Threshold Variation'
        plot_curve(t_pcts, avg_result, title, 'Threshold', 'Best Score')

##################################################################
########### SA ###################################
#SA(problem, 42, 0, 10, 1000)

    def experiment_sa_11(self):
        init_state = None
        prob_lengths = np.arange(7, 30)
        schedule_var = 0
        best_state = None
        result = np.zeros((len(self.rand_seeds), len(prob_lengths)))
        best_state = None
        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(prob_lengths)):
                prob_length = prob_lengths[j]
                fl = CustomProblem(prob_length.item(), self.problem_type)
                problem = fl.create_problem()
                alg = SA(problem, init_state, rand_state, schedule_var, 10,
                         1000)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        print(str(result))
        print('best_state')
        print(best_state)
        avg_result = np.mean(result, axis=0)
        print('avg result for varying input size' + str(avg_result))
        title = self.problem_type + ' with SA - Input Size Variation'
        plot_curve(prob_lengths, avg_result, title, 'Input Size', 'Best Score')

    def experiment_sa_22(self):
        init_state = None
        schedule_var = 0
        best_state = None
        max_attempts = np.arange(50, 60, 5)
        print(max_attempts)
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                max_iter = np.inf
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best_state')
        print(best_state)
        title = self.problem_type + ' with SA - Max Attempts Variation'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_sa_2(self):
        init_state = None
        schedule_var = 0
        best_state = None
        max_attempts = np.array([5, 10, 15, 40, 60, 80, 100, 150,
                                 200])  #np.arange(100, 600, 100)
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20

                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                max_iter = np.inf
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best_state')
        print(best_state)
        title = self.problem_type + ' with SA - Max Attempts Variation-Exp'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_sa_3(self):
        init_state = None
        schedule_var = 0
        best_state = None
        max_iters = np.arange(100, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()

                max_iter = max_iters[j].item()
                max_attempt = 200
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('best_state')
        print(best_state)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with SA - Max Iterations Variation-Exp Decay'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_sa_33(self):
        init_state = None
        schedule_var = 2
        best_state = None
        max_iters = np.arange(100, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()

                max_iter = max_iters[j].item()
                max_attempt = 200
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('best_state')
        print(best_state)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with SA - Max Iterations Variation-Arith'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_sa_4(self):
        init_state = None
        schedule_var = 1
        best_state = None
        max_attempts = np.array([5, 10, 15, 40, 60, 80, 100, 150, 200])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                max_iter = np.inf
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best_state')
        print(best_state)
        title = self.problem_type + ' with SA - Max Attempts Variation -Geom'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_sa_5(self):
        init_state = None
        schedule_var = 1
        best_state = None
        max_iters = np.arange(100, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                max_iter = max_iters[j].item()
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('best_state')
        print(best_state)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with SA - Max Iter Variation - Geom'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_sa_6(self):
        init_state = None
        schedule_var = 2
        best_state = None
        max_attempts = np.array([5, 10, 15, 40, 60, 80, 100, 150, 200])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                max_iter = np.inf
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        print('best_state')
        print(best_state)
        title = self.problem_type + ' with SA - Max Attempts Variation -Arith'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_sa_7(self):
        init_state = None
        schedule_var = 2
        best_state = None
        max_iters = np.arange(100, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                max_iter = max_iters[j].item()
                alg = SA(problem, init_state, rand_state, schedule_var,
                         max_attempt, max_iter)
                best_state, best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('best_state')
        print(best_state)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with SA - Max Iter Variation - Arith'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

#####################################################################
# ##  GA ############################
# GA(problem, 42, 10, 1000, 200, 0.1)

    def experiment_ga_1(self):
        prob_lengths = np.arange(7, 30)
        result = np.zeros((len(self.rand_seeds), len(prob_lengths)))
        pop_size = 200
        mutation_prob = 0.1

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(prob_lengths)):
                prob_length = prob_lengths[j]
                fl = CustomProblem(prob_length.item(), self.problem_type)
                problem = fl.create_problem()
                alg = GA(problem, rand_state, 10, 1000, pop_size,
                         mutation_prob)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        print(str(result))
        avg_result = np.mean(result, axis=0)
        print('avg result for varying input size' + str(avg_result))
        title = self.problem_type + ' with GA - Input Size Variation'
        plot_curve(prob_lengths, avg_result, title, 'Input Size', 'Best Score')

    def experiment_ga_2(self):
        max_attempts = np.array([5, 10, 15, 30, 40, 50, 60, 80, 100])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))
        pop_size = 200
        mutation_prob = 0.1

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                max_iter = np.inf
                alg = GA(problem, rand_state, max_attempt, max_iter, pop_size,
                         mutation_prob)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with GA - Max Attempts Variation'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_ga_3(self):
        max_iters = np.arange(500, 3000, 400)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))
        pop_size = 200
        mutation_prob = 0.1

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 20
                max_iter = max_iters[j].item()
                alg = GA(problem, rand_state, max_attempt, max_iter, pop_size,
                         mutation_prob)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with GA - Max Iterations Variation'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_ga_4(self):
        pop_sizes = np.arange(50, 1000, 100)
        result = np.zeros((len(self.rand_seeds), len(pop_sizes)))
        mutation_prob = 0.1
        max_iter = np.inf

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(pop_sizes)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 20
                pop_size = pop_sizes[j].item()
                alg = GA(problem, rand_state, max_attempt, max_iter, pop_size,
                         mutation_prob)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with GA - Population Size Variation'
        plot_curve(pop_sizes, avg_result, title, 'Population Size',
                   'Best Score')

    def experiment_ga_5(self):
        mutation_probs = np.arange(0.1, 1, 0.1)
        result = np.zeros((len(self.rand_seeds), len(mutation_probs)))
        pop_size = 1000
        max_iter = np.inf

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(mutation_probs)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 60
                mutation_prob = mutation_probs[j].item()
                alg = GA(problem, rand_state, max_attempt, max_iter, pop_size,
                         mutation_prob)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with GA - Mutation Prob Variation'
        plot_curve(mutation_probs, avg_result, title, 'Mutation Prob',
                   'Best Score')

#####################################################################
# ##  mimic ############################
# Mimic(problem, 42, 10, 1000, 200, 0.1)

    def experiment_mimic_1(self):
        prob_lengths = np.arange(7, 30)
        result = np.zeros((len(self.rand_seeds), len(prob_lengths)))
        pop_size = 200
        keep_pct = 0.1

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(prob_lengths)):
                prob_length = prob_lengths[j]
                fl = CustomProblem(prob_length.item(), self.problem_type)
                problem = fl.create_problem()
                alg = Mimic(problem, rand_state, 10, 1000, pop_size, keep_pct)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        print(str(result))
        avg_result = np.mean(result, axis=0)
        print('avg result for varying input size' + str(avg_result))
        title = self.problem_type + ' with mimic - Input Size Variation'
        plot_curve(prob_lengths, avg_result, title, 'Input Size', 'Best Score')

    def experiment_mimic_2(self):
        max_attempts = np.array([5, 10, 30, 50, 60, 80, 100, 150, 200])
        result = np.zeros((len(self.rand_seeds), len(max_attempts)))
        pop_size = 200
        keep_pct = 0.1
        max_iter = np.inf

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_attempts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = max_attempts[j].item()
                alg = Mimic(problem, rand_state, max_attempt, max_iter,
                            pop_size, keep_pct)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with Mimic - Max Attempts Variation'
        plot_curve(max_attempts, avg_result, title, 'Max Attempts',
                   'Best Score')

    def experiment_mimic_3(self):
        max_iters = np.arange(1000, 5000, 100)
        result = np.zeros((len(self.rand_seeds), len(max_iters)))
        pop_size = 800
        keep_pct = 0.6

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(max_iters)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                max_iter = max_iters[j].item()
                alg = Mimic(problem, rand_state, max_attempt, max_iter,
                            pop_size, keep_pct)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with Mimic - Max Iterations Variation'
        plot_curve(max_iters, avg_result, title, 'Max Iterations',
                   'Best Score')

    def experiment_mimic_4(self):
        pop_sizes = np.arange(200, 1000, 200)
        result = np.zeros((len(self.rand_seeds), len(pop_sizes)))
        max_iter = np.inf
        keep_pct = 0.1

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(pop_sizes)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                pop_size = pop_sizes[j].item()
                alg = Mimic(problem, rand_state, max_attempt, max_iter,
                            pop_size, keep_pct)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with Mimic - Population Size Variation'
        plot_curve(pop_sizes, avg_result, title, 'Population Size',
                   'Best Score')

    def experiment_mimic_5(self):
        keep_pcts = np.arange(0.1, 1, 0.1)
        result = np.zeros((len(self.rand_seeds), len(keep_pcts)))
        pop_size = 800
        max_iter = np.inf

        for i in range(len(self.rand_seeds)):
            rand_state = self.rand_seeds[i]
            for j in range(len(keep_pcts)):
                prob_length = 20
                fl = CustomProblem(prob_length, self.problem_type)
                problem = fl.create_problem()
                max_attempt = 200
                keep_pct = keep_pcts[j].item()
                alg = Mimic(problem, rand_state, max_attempt, max_iter,
                            pop_size, keep_pct)
                best_fitness = alg.optimize()
                result[i][j] = best_fitness

        avg_result = np.mean(result, axis=0)
        print('avg result ' + str(avg_result))
        title = self.problem_type + ' with Mimic - Keep PCT Variation'
        plot_curve(keep_pcts, avg_result, title, 'Keep PCT', 'Best Score')

    def experiment_sa(self):
        fl = FlipFlop(7)
        problem = fl.create_problem()
        alg = SA(problem, 42, 0, 10, 1000)
        alg.optimize()

    def experiment_ga(self):
        fl = FlipFlop(7)
        problem = fl.create_problem()
        alg = GA(problem, 42, 10, 1000, 200, 0.1)
        alg.optimize()

    def experiment_mimc(self):
        fl = FlipFlop(7)
        problem = fl.create_problem()
        alg = Mimic(problem, 42, 10, 1000, 200, 0.1)
        alg.optimize()

    def experiment_optimal_rhc(self):
        prob_length = 20
        max_attempt = 100
        restarts = 100
        max_iter = 1000
        rand_state = 42
        init_state = None
        fl = CustomProblem(prob_length, self.problem_type)
        problem = fl.create_problem()
        start = time.time()
        alg = RHC(problem, init_state, rand_state, max_attempt, max_iter,
                  restarts)
        best_score, best_fitness = alg.optimize()
        end = time.time()
        diff = abs(end - start)
        print('time taken for RHC- Knapsack: ' + str(diff))

    def experiment_optimal_sa(self):
        prob_length = 20
        init_state = None
        schedule_var = 0
        rand_state = 42
        max_attempt = 110
        fl = CustomProblem(prob_length, self.problem_type)
        problem = fl.create_problem()
        start = time.time()
        alg = SA(problem, init_state, rand_state, schedule_var, max_attempt,
                 1000)
        best_score, best_fitness = alg.optimize()
        end = time.time()
        diff = abs(end - start)
        print('time taken for SA - Knapsack: ' + str(diff))

    def experiment_optimal_ga(self):
        prob_length = 20
        fl = CustomProblem(prob_length, self.problem_type)
        problem = fl.create_problem()
        pop_size = 1200
        rand_state = 42
        max_attempt = 70
        max_iter = 1000
        mutation_prob = 0.1
        start = time.time()
        alg = GA(problem, rand_state, max_attempt, max_iter, pop_size,
                 mutation_prob)
        best_fitness = alg.optimize()
        end = time.time()
        diff = abs(end - start)
        print('time taken for GA- Knapsack: ' + str(diff))

    def experiment_optimal_mimic(self):
        prob_length = 20
        fl = CustomProblem(prob_length, self.problem_type)
        problem = fl.create_problem()
        pop_size = 800
        rand_state = 42
        max_attempt = 200
        max_iter = 1000
        mutation_prob = 0.1
        keep_pct = 0.1
        start = time.time()
        alg = Mimic(problem, rand_state, max_attempt, max_iter, pop_size,
                    keep_pct)
        best_fitness = alg.optimize()
        end = time.time()
        diff = abs(end - start)
        print('time taken for Mimic - Knapsack: ' + str(diff))
 def experiment_run_test_bank(self):
     self.learner = DTLearner(max_depth=4, max_leaf_nodes=5)
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()
class ANNExp:
    def __init__(self, reader, helper, splitter):
        self.reader = reader
        self.helper = helper
        self.learner = ANNLearner()
        self.splitter = splitter
        self.expHelper = ExperimentHelper(self.splitter, self.learner)

    def experiment(self):

        self.model_complexity_exp()
        self.model_complexity_exp_alpha1()
        self.model_complexity_exp_epoch()
        self.model_complexity_exp_epoch2()
        if (self.splitter.reader.dataset == 'Bank'):
            print('bank')
            self.learning_curve_iter2_bank()
            self.experiment_run_test_bank()
        else:
            self.learning_curve_iter2_wine()
            self.experiment_run_test_wine()

        # Perform learning curve
        """self.expHelper.learning_curve_exp()
        self.model_complexity_exp()
        self.model_complexity_exp_alpha1()
        self.model_complexity_exp_alpha2()
        #self.exp_grid_search()"""
        #self.learning_curve_iter2()
        #self.exp_grid_search3()

        #self.learner.train(self.splitter.X_train, self.splitter.y_train)
        #y_pred = self.learner.query(self.splitter.X_test)
        """print("Final Accuracy for " + str(self.learner.__class__)+": ", 
                        metrics.accuracy_score(self.splitter.y_test, y_pred))"""

    def model_complexity_exp(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        param_range = np.array([(50, ), (100, ), (200, ), (300, ), (400, ),
                                (500, )])
        #param_range = np.array([100, 200,300,400, 500])
        self.expHelper.model_complexity_exp('hidden_layer_sizes', param_range)

    def model_complexity_exp11(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        param_range = np.array([(50, ), (
            50,
            50,
        ), (
            50,
            50,
            50,
        ), (
            50,
            50,
            50,
            50,
        ), (
            50,
            50,
            50,
            50,
            50,
        )])
        #param_range = np.array([100, 200,300,400, 500])
        self.expHelper.model_complexity_exp('hidden_layer_sizes', param_range)

    def model_complexity_exp_alpha21(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(hidden_layer_sizes=(
            50,
            50,
            50,
        ))
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.001, 0.01, 0.1,1,5])
        param_range = np.array([0.0001, 0.001, 0.002, 0.003, 0.005, 0.008])
        self.expHelper.model_complexity_exp('alpha', param_range)

    def model_complexity_exp_epoch(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.01,
                                  hidden_layer_sizes=(
                                      50,
                                      50,
                                      50,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=True)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        param_range = np.array([1, 10, 50, 100, 200, 500])
        self.expHelper.model_complexity_exp('max_iter', param_range)

    def model_complexity_exp_epoch2(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(hidden_layer_sizes=(300, ),
                                  alpha=0.0001,
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '3')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        param_range = np.array([1, 10, 50, 100, 200, 500, 1000])
        self.expHelper.model_complexity_exp('max_iter', param_range)

    def model_complexity_exp_epoch3(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(hidden_layer_sizes=(200, 100),
                                  alpha=0.008,
                                  early_stopping=True,
                                  solver='sgd',
                                  activation='tanh')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '4')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        param_range = np.array([1, 10, 50, 100, 200, 500, 1000])
        self.expHelper.model_complexity_exp('max_iter', param_range)

    def model_complexity_exp_alpha1(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner(hidden_layer_sizes=(300, ))
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '1')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])
        self.expHelper.model_complexity_exp('alpha', param_range)

        self.learner = ANNLearner(hidden_layer_sizes=(390, ))
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.0001, 0.0005, 0.001, 0.005, 0.008])
        self.expHelper.model_complexity_exp('alpha', param_range)

    def model_complexity_exp_alpha2(self):
        #TODO should we create a new learner object??
        self.learner = ANNLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner, '2')
        #param_range = np.array([(100),(200,),(300,),(400,),(500,)])
        param_range = np.array([0.0001, 0.001, 0.01, 0.1, 1, 5])
        self.expHelper.model_complexity_exp('alpha', param_range)

    def exp_grid_search2(self):
        grid_param = {
            'hidden_layer_sizes': [(50, 50, 50), (50, 50, 50, 50),
                                   (50, 50, 50, 50, 50)],
            'activation': ['relu'],
            'solver': ['adam'],
            'alpha': [0.1, 0.2, 0.3, 0.4, 0.05],
            'learning_rate': ['constant'],
            'momentum': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
            'max_iter': [200, 300, 500, 700, 1000, 1500, 2000, 2500],
        }
        self.expHelper.perform_grid_search(grid_param)

    def exp_grid_search(self):
        grid_param = {
            'hidden_layer_sizes': [(50, ), (20, 20, 20)],
            'activation': ['relu'],
            'solver': ['adam'],
            'alpha': [0.0001, 0.05, 0.1, 0.2, 0.4, 0.5, 0.6],
            'learning_rate': ['constant'],
            'learning_rate_init': [0.001, 0.002, 0.004],
            'shuffle': [True, False],
            'early_stopping': [True, False],
            'beta_1': [0.9, 0.5],
            'beta_2': [0.999, 0.5]
        }
        self.expHelper.perform_grid_search(grid_param)

    def exp_grid_search3(self):
        grid_param = {
            'hidden_layer_sizes': [(50, 50), (100, 100), (200, 200),
                                   (300, 300), (400, 400), (500, 500)],
            'activation': ['relu'],
            'solver': ['sgd'],
            'alpha': [0.0001, 0.05, 0.1, 0.2, 0.4, 0.5, 0.6],
            'learning_rate': ['constant', 'adaptive'],
        }
        self.expHelper.perform_grid_search(grid_param)

    def exp_grid_search4(self):
        grid_param = {
            'hidden_layer_sizes': [(50, 50, 50), (100, 100, 100),
                                   (200, 200, 200), (300, 300, 100),
                                   (400, 400, 100), (500, 500, 100)],
            'activation': ['relu'],
            'solver': ['sgd'],
            'alpha': [0.2],
            'learning_rate': ['constant']
        }
        self.expHelper.perform_grid_search(grid_param)

    def learning_curve_iter21_wine(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.008,
                                  hidden_layer_sizes=(100, ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_wine(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.0001,
                                  hidden_layer_sizes=(
                                      50,
                                      50,
                                      50,
                                      50,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-3')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_bank(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.01,
                                  hidden_layer_sizes=(
                                      50,
                                      50,
                                      50,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-3')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter21_bank(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.01,
                                  hidden_layer_sizes=(300, ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def experiment_run_test_wine2(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.008,
                                  hidden_layer_sizes=(100, ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=True)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank2(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(390, ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.01,
                                  hidden_layer_sizes=(
                                      50,
                                      50,
                                      50,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_wine(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.0001,
                                  hidden_layer_sizes=(
                                      50,
                                      50,
                                      50,
                                      50,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank_iter2(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.06,
                                  hidden_layer_sizes=(200, 200, 200, 200, 200,
                                                      200, 200),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=True,
                                  max_iter=600,
                                  momentum=0.4)
        print('100, 100, 100, 100, 100, 100,50 alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
        """ self.learner = ANNLearner(
            activation = 'tanh',
            alpha = 0.2,
            hidden_layer_sizes = (50, 50, 50, 50, 50, 50, 50, 50),
            learning_rate = 'constant',
            solver = 'sgd',
            early_stopping = False
        )
        print('100, 100, 100, 100, 100, 50,50, 50 alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()    

        self.learner = ANNLearner(
            activation = 'tanh',
            alpha = 0.6,
            hidden_layer_sizes = (100, 100, 100, 100, 100, 50, 50, 50, 50),
            learning_rate = 'constant',
            solver = 'sgd',
            early_stopping = False
        )
        print('100, 100, 100, 100, 100, 50,50, 50, 50 alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()  """

    def experiment_run_test_bank_iter(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(390, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('390,100')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(200, 200, 200),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 200, 200')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(400, 400, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('400, 400, 400')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(500, 500, 500),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('500, 500, 500')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(200, 300, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 300, 400')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(390, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('390,100')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(200, 200, 200),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 200, 200 alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(400, 400, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('400, 400, 400  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(500, 500, 500),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('500, 500, 500  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(200, 300, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 300, 400  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(
                                      100,
                                      100,
                                      100,
                                      100,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100,  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(100, 100, 100, 100, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100, 100, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(100, 100, 100, 100, 100,
                                                      50),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100, 100, 50, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(50, 50, 50, 50, 50, 50),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('50, 50, 50, 50, 50, 50, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
    def experiment_run_test_bank_iter(self):
        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(390, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('390,100')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(200, 200, 200),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 200, 200')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(400, 400, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('400, 400, 400')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(500, 500, 500),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('500, 500, 500')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(200, 300, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 300, 400')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.7,
                                  hidden_layer_sizes=(390, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('390,100')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(200, 200, 200),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 200, 200 alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(400, 400, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('400, 400, 400  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(500, 500, 500),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('500, 500, 500  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(200, 300, 400),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('200, 300, 400  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(
                                      100,
                                      100,
                                      100,
                                      100,
                                  ),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100,  alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(100, 100, 100, 100, 100),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100, 100, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(100, 100, 100, 100, 100,
                                                      50),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('100, 100, 100, 100, 100, 50, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

        self.learner = ANNLearner(activation='relu',
                                  alpha=0.3,
                                  hidden_layer_sizes=(50, 50, 50, 50, 50, 50),
                                  learning_rate='constant',
                                  solver='adam',
                                  early_stopping=False)
        print('50, 50, 50, 50, 50, 50, alpha 0.3')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
 def learning_curve_iter2_bank(self):
     self.learner = BoostLearner(max_depth=2, learning_rate=0.1)
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                       '-iter-2')
     self.expHelper.learning_curve_exp()
Exemple #26
0
 def model_complexity_exp1(self):
     #TODO should we create a new learner object??
     self.learner = SVMLearner()
     self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf')
     param_range = np.array([2, 4, 6, 8])
     self.expHelper.model_complexity_exp('degree', param_range)
Exemple #27
0
class SVMExp:
    def __init__(self, reader, helper, splitter):
        self.reader = reader
        self.helper = helper
        self.learner = SVMLearner()
        self.splitter = splitter
        self.expHelper = ExperimentHelper(self.splitter, self.learner)

    def experiment(self):

        #self.model_complexity_exp5()
        #self.model_complexity_exp6()
        #self.model_complexity_exp4()
        #self.model_complexity_exp44()
        #self.model_complexity_exp2()
        self.model_complexity_exp6()
        self.model_complexity_exp5()
        self.model_complexity_exp_linear1()
        self.model_complexity_exp_linear2()
        if (self.splitter.reader.dataset == 'Bank'):
            print('bank')
            self.learning_curve_iter2_bank_linear()
            self.learning_curve_iter2_bank()
            self.experiment_run_test_bank_linear()
            self.experiment_run_test_bank()
        else:
            self.learning_curve_iter2_wine_linear()
            self.learning_curve_iter2_wine()
            self.experiment_run_test_wine()
            self.experiment_run_test_wine_linear()

        # Perform learning curve
        #self.expHelper.learning_curve_exp()
        """self.model_complexity_exp1()
        self.model_complexity_exp2()
        self.model_complexity_exp3()
        self.model_complexity_exp4()"""
        #self.model_complexity_exp5()
        #self.model_complexity_exp6()
        """self.exp_grid_search()"""
        #self.learning_curve_iter2()
        #self.model_complexity_exp4()
        #self.model_complexity_exp44()

        #self.learner.train(self.splitter.X_train, self.splitter.y_train)
        #y_pred = self.learner.query(self.splitter.X_test)
        """print("Final Accuracy for " + str(self.learner.__class__)+": ", 
                        metrics.accuracy_score(self.splitter.y_test, y_pred))"""

    def model_complexity_exp_linear1(self):
        #TODO should we create a new learner object??
        self.learner = LinearSVMLearner(dual=True)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear-2', 'LinearKernel')
        param_range = np.array([0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8])
        self.expHelper.model_complexity_exp('C', param_range)

    def model_complexity_exp_linear2(self):
        #TODO should we create a new learner object??
        self.learner = LinearSVMLearner(fit_intercept=True, C=0.01)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear-3', 'LinearKernel')
        param_range = np.array([1, 2, 3, 4, 5, 6])
        self.expHelper.model_complexity_exp('intercept_scaling', param_range)

    def model_complexity_exp1(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf')
        param_range = np.array([2, 4, 6, 8])
        self.expHelper.model_complexity_exp('degree', param_range)

    def model_complexity_exp2(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='linear', C=0.2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear4', 'Linear Kernel')
        param_range = np.array([1, 2, 4, 5, 6, 7, 8])
        self.expHelper.model_complexity_exp('gamma', param_range)

        self.learner = SVMLearner(kernel='linear', C=0.2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear5', 'Linear Kernel')
        param_range = np.array([0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8])
        self.expHelper.model_complexity_exp('gamma', param_range)

    def model_complexity_exp3(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='sigmoid')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'sigmoid', 'Sigmoid Kernel')
        #param_range = np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5])
        #self.expHelper.model_complexity_exp('C', param_range)

        param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)

    def model_complexity_exp4(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='linear')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear3', 'Linear Kernel')
        #param_range = np.array([0.5, 1, 1.5, 2])
        #param_range = np.array([2, 3, 4, 5, 6, 9, 12])
        #param_range = np.array([0.01, 0.02, 0.03, 0.043, 0.05, 0.06])
        param_range = np.array([0.0001, 0.0005, 0.0008, 0.001, 0.005, 0.008])
        self.expHelper.model_complexity_exp('C', param_range)

    def model_complexity_exp44(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='linear')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'linear1', 'Linear Kernel')
        #param_range = np.array([0.5, 1, 1.5, 2])
        #param_range = np.array([2, 3, 4, 5, 6, 9, 12])
        param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.8, 0.9, 1])
        self.expHelper.model_complexity_exp('C', param_range)
        """self.learner = SVMLearner(kernel = 'linear')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'linear3', 'Linear Kernel')
        
        param_range = np.array([2, 3, 4, 5, 6, 9, 12])   
        self.expHelper.model_complexity_exp('C', param_range) """

    def model_complexity_exp5(self):
        #TODO should we create a new learner object??
        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf1',
                                          'rbf Kernel')

        param_range = np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.expHelper.model_complexity_exp('C', param_range)

        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf2',
                                          'rbf Kernel')
        param_range = np.array([0.5, 1, 1.5, 2])

        self.expHelper.model_complexity_exp('C', param_range)

        self.learner = SVMLearner(kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf3',
                                          'rbf Kernel')

        param_range = np.array([2, 3, 4, 5, 6, 9, 12])

        self.expHelper.model_complexity_exp('C', param_range)

    def model_complexity_exp6(self):
        #TODO should we create a new learner object??
        #self.learner = SVMLearner(kernel = 'rbf', C =0.1)
        self.learner = SVMLearner(kernel='rbf', C=0.5)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf8',
                                          'rbf Kernel')
        #param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.5,2, 3, 4, 5, 6])
        param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)

        self.learner = SVMLearner(kernel='rbf', C=0.5)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf9',
                                          'rbf Kernel')
        param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        #param_range = np.array([0.5,2, 3, 4, 5, 6])
        #param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)
        """self.learner = SVMLearner(kernel = 'rbf', C =0.2)
        self.expHelper = ExperimentHelper(self.splitter, self.learner, 'rbf5','rbf Kernel')
        #param_range = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
        param_range = np.array([0.5,2, 3, 4, 5, 6])
        #param_range = np.array([0.01, 0.03, 0.05, 0.07, 0.09])
        self.expHelper.model_complexity_exp('gamma', param_range)"""

    def exp_grid_search(self):
        #degree_range = list(range(1, 31))
        kernel_options = ['rbf']
        c_range = np.array([1, 2, 3, 4, 5, 6, 9, 12])
        gamma_options = np.array([0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5])
        grid_param = dict(C=c_range,
                          kernel=kernel_options,
                          gamma=gamma_options)

        self.expHelper.perform_grid_search(grid_param)

    def learning_curve_iter2_wine(self):
        self.learner = SVMLearner(gamma=0.05, C=0.5, kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_wine_linear(self):
        self.learner = LinearSVMLearner(fit_intercept=True, C=0.05)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'Linear Kernel')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_bank_linear(self):
        print("linear")
        self.learner = LinearSVMLearner(fit_intercept=True,
                                        C=0.01,
                                        intercept_scaling=6)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'Linear Kernel')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_bank(self):
        self.learner = SVMLearner(gamma=0.03, C=0.2, kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def experiment_run_test_wine(self):
        self.learner = SVMLearner(gamma=0.05, C=0.5, kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_wine_linear(self):
        self.learner = LinearSVMLearner(fit_intercept=True, C=0.05)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'Linear Kernel')
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank_linear(self):
        self.learner = LinearSVMLearner(fit_intercept=True,
                                        C=0.01,
                                        intercept_scaling=6)
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'Linear Kernel')
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank(self):
        self.learner = SVMLearner(gamma=0.03, C=0.2, kernel='rbf')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
Exemple #28
0
class KNNExp:
    def __init__(self, reader, helper, splitter):
        self.reader = reader
        self.helper = helper
        self.learner = KNNLearner()
        self.splitter = splitter
        self.expHelper = ExperimentHelper(self.splitter, self.learner)

    def experiment(self):
        self.model_complexity_exp1()
        self.model_complexity_exp11()
        self.model_complexity_exp2()
        self.model_complexity_exp3()
        if (self.splitter.reader.dataset == 'Bank'):
            print('bank')
            self.experiment_run_test_bank()
            self.learning_curve_iter2_bank()
        else:
            self.experiment_run_test_wine()
            self.learning_curve_iter2_wine()

        # Perform learning curve
        #self.expHelper.learning_curve_exp()
        #self.model_complexity_exp1()
        #self.model_complexity_exp2()
        #self.model_complexity_exp3()
        #self.model_complexity_exp4()
        """"self.exp_grid_search()"""
        #self.learning_curve_iter2()

        #self.learner.train(self.splitter.X_train, self.splitter.y_train)
        #y_pred = self.learner.query(self.splitter.X_test)
        """print("Final Accuracy for " + str(self.learner.__class__)+": ", 
                        metrics.accuracy_score(self.splitter.y_test, y_pred))"""

    def model_complexity_exp1(self):
        #TODO should we create a new learner object??
        self.learner = KNNLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'euclidean')
        #param_range = np.array([1,2,3,4,5])
        param_range = np.arange(1, 40, 2)
        print(param_range)
        self.expHelper.model_complexity_exp('n_neighbors', param_range)

    def model_complexity_exp11(self):
        #TODO should we create a new learner object??
        self.learner = KNNLearner()
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'euclidean', '1')
        #param_range = np.array([1,2,3,4,5])
        param_range = np.arange(40, 60, 2)
        print(param_range)
        self.expHelper.model_complexity_exp('n_neighbors', param_range)

    def model_complexity_exp2(self):
        #TODO should we create a new learner object??
        self.learner = KNNLearner(metric='euclidean')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'euclidean')
        param_range = np.array([1, 2, 3, 4, 5])
        self.expHelper.model_complexity_exp('n_neighbors', param_range)

    def model_complexity_exp3(self):
        #TODO should we create a new learner object??
        self.learner = KNNLearner(weights='distance')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'distance weight')
        param_range = np.array([1, 2, 3, 4, 5])
        self.expHelper.model_complexity_exp('n_neighbors', param_range)

    def model_complexity_exp4(self):
        #TODO should we create a new learner object??
        self.learner = KNNLearner(metric='euclidean', weights='distance')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          'distance weight')
        param_range = np.arange(1, 40, 2)
        print(param_range)
        self.expHelper.model_complexity_exp('n_neighbors', param_range)

    def exp_grid_search(self):
        k_range = list(range(1, 31))
        weight_options = ['uniform', 'distance']
        metric_options = ['euclidean', 'minkowski']
        grid_param = dict(n_neighbors=k_range,
                          weights=weight_options,
                          metric=metric_options)

        self.expHelper.perform_grid_search(grid_param)
        """grid_param = {
            'n_estimators': [100, 300, 500, 800, 1000],
            'criterion': ['gini', 'entropy'],
            'bootstrap': [True, False]
        }"""

    def learning_curve_iter2_wine(self):
        self.learner = KNNLearner(metric='euclidean',
                                  n_neighbors=30,
                                  weights='uniform')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def learning_curve_iter2_bank(self):
        self.learner = KNNLearner(metric='euclidean',
                                  n_neighbors=38,
                                  weights='uniform')
        self.expHelper = ExperimentHelper(self.splitter, self.learner,
                                          '-iter-2')
        self.expHelper.learning_curve_exp()

    def experiment_run_test_wine(self):
        self.learner = KNNLearner(metric='euclidean',
                                  n_neighbors=30,
                                  weights='uniform')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()

    def experiment_run_test_bank(self):
        self.learner = KNNLearner(metric='euclidean',
                                  n_neighbors=38,
                                  weights='uniform')
        self.expHelper = ExperimentHelper(self.splitter, self.learner)
        self.expHelper.experiment_run_test()
Exemple #29
0
 def __init__(self, reader, helper, splitter):
     self.reader = reader
     self.helper = helper
     self.learner = SVMLearner()
     self.splitter = splitter
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
 def experiment_run_test_bank(self):
     self.learner = BoostLearner(max_depth=2, learning_rate=0.1)
     self.expHelper = ExperimentHelper(self.splitter, self.learner)
     self.expHelper.experiment_run_test()