Пример #1
0
def n_queens_sa(nq_problem,
                initial_state,
                max_iters=np.inf,
                num_runs=20,
                verbose=False):
    hp_name = 'schedule'
    hp_values = [mlrose.ArithDecay(), mlrose.GeomDecay(), mlrose.ExpDecay()]
    hp_values_strings = [
        val.get_info__()['schedule_type'] for val in hp_values
    ]

    # run for each hp value and append results to list

    fitness_dfs = []
    runs = np.arange(num_runs)

    for hp_value, hp_value_string in zip(hp_values, hp_values_strings):
        schedule = hp_value  # set varied HP at beginning of loop

        run_times = np.zeros(num_runs)
        fitness_data = pd.DataFrame()

        for run in runs:
            run_t0 = time()
            best_state, best_fitness, fitness_curve = mlrose.simulated_annealing(
                problem=nq_problem,
                schedule=schedule,
                max_attempts=10,
                max_iters=max_iters,
                curve=True,
            )
            run_time = time() - run_t0
            run_times[run] = run_time

            fitness_data = pd.concat(
                [fitness_data, pd.DataFrame(fitness_curve)],
                axis=1,
                sort=False)

        fitness_data.columns = runs
        fitness_data = fitness_data.fillna(method='ffill')
        fitness_dfs.append(fitness_data)

        # calculate and print avg time per run
        avg_run_time = np.average(run_times)
        print("N-Queens - SA avg run time,", hp_value_string, hp_name, ":",
              avg_run_time)

    # generate plots
    plot_title = "N-Queens SA: fitness vs. iterations"
    plotting.plot_fitness_curves(
        fitness_dfs=fitness_dfs,
        hp_values=hp_values_strings,
        hp_name=hp_name,
        title=plot_title,
    )
    plt.savefig('graphs/n_queens_sa_fitness.png')
    plt.clf()

    return fitness_dfs
Пример #2
0
def sa_optimization(size):
    algo = 'sa'
    problem = get_problem(size)

    # Gridsearch params
    max_iters = 500
    schedule_values = [mlrose_hiive.GeomDecay(), mlrose_hiive.ArithDecay(), mlrose_hiive.algorithms.decay.ExpDecay()]
    max_attempts_values = [10, 50, 100, 200]
    n_runs = len(schedule_values) * len(max_attempts_values)

    # Best vals
    schedule, max_attempts = None, None
    fitness, curves, n_invocations, time = float('-inf'), [], 0, 0

    # Gridsearch
    global eval_count
    run_counter = 0
    for run_schedule in schedule_values:
        for run_max_attempts in max_attempts_values:
            # Print status
            run_counter += 1
            print(f'RUN {run_counter} of {n_runs} [schedule: {run_schedule.__class__.__name__}] [max_attempts: {run_max_attempts}]')

            # Run problem
            eval_count = 0
            start = timer()
            run_state, run_fitness, run_curves = mlrose_hiive.simulated_annealing(problem,
                                                                                  schedule=run_schedule,
                                                                                  max_attempts=run_max_attempts,
                                                                                  max_iters=max_iters,
                                                                                  random_state=42,
                                                                                  curve=True)
            end = timer()

            # Save curves and params
            if run_fitness > fitness:
                schedule = run_schedule.__class__.__name__
                max_attempts = run_max_attempts
                fitness = run_fitness
                curves = run_curves
                n_invocations = eval_count
                time = end - start

    df = pandas.DataFrame(curves, columns=['fitness'])
    df['schedule'] = schedule
    df['max_attempts'] = max_attempts
    df['max_iters'] = max_iters
    df['n_invocations'] = n_invocations
    df['time'] = time
    df.to_csv(f'{STATS_FOLDER}/{algo}_{size}_stats.csv', index=False)

    print(f'{algo}_{size} run.')
Пример #3
0
    def runSA(self):
        default = {
            'problem': self.problem,
            'schedule': self.schedule,
            'max_attempts': 10,
            'max_iters': 1000,
            'init_state': self.init_state,
            'curve': True,
            'random_state': 1
        }

        maxAttempts = [5, 10, 20]
        schedules = [mlrose.GeomDecay(), mlrose.ExpDecay(),
                     mlrose.ArithDecay()]
        bestFitness = None
        (bestState, bestCurve, bestParams) = None, None, None
        for i in maxAttempts:
            for j in schedules:
                params = _.assign(
                    {}, default, {'max_attempts': i, 'schedule': j})

                scores = []
                for r in range(5):
                    randomSeed = np.random.randint(0, 1000)
                    params = _.assign(
                        {}, params, {'random_state': randomSeed})
                    state, fitness, curve = self._run(
                        mlrose.simulated_annealing, name='%s' % i, **params)
                    scores.append(fitness)
                avgFitness = np.mean(scores)

                if bestFitness == None or (self.isMaximize and avgFitness > bestFitness) or (not self.isMaximize and avgFitness < bestFitness):
                    bestFitness = avgFitness
                    (bestState, bestCurve, bestParams) = state, curve, params
                # if fitness == 0:
                #     break
        print('SA - Params: %s' % bestParams)
        log.info('\tSA - Best fitness found: %s\n\t\tmaxAttempts: %s \n\t\tschedule: %s' %
                 (bestFitness, bestParams['max_attempts'], type(bestParams['schedule']).__name__))

        return bestCurve
Пример #4
0
init = np.random.choice(2**(final_prob_len + 1),
                        size=final_prob_len,
                        replace=False)
problem = mlr.DiscreteOpt(length=final_prob_len,
                          fitness_fn=fitness_cust,
                          maximize=True,
                          max_val=2**(final_prob_len + 1))

print(init)
decays = [1, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015, 0.00725, 0.00375]
_fitnesses = []
_times = []

start = time.time()
for i in decays:
    sch = mlr.ArithDecay(10, i, 0.0001)
    it_start = time.time()
    best_sa_state, best_sa_fitness, sa_curve = mlr.simulated_annealing(
        problem,
        schedule=sch,
        max_attempts=attempts,
        random_state=seed,
        curve=True,
        init_state=init,
        max_iters=1000)
    it_end = time.time()
    print(i, best_sa_fitness, best_sa_state)
    _fitnesses.append(best_sa_fitness)
    _times.append(it_end - it_start)

end = time.time()
Пример #5
0
def nn_impl():

    #iris_data = fetch_openml('iris')
    #X_whole, y_whole = iris_data['data'], iris_data['target']

    sklearn_data = datasets.load_breast_cancer()
    x, y = sklearn_data.data, sklearn_data.target
    #x = preprocessing.scale(x)

    # Split the initial data
    xtrain, xtest, ytrain, ytest = train_test_split(x,
                                                    y,
                                                    test_size=0.4,
                                                    random_state=42)

    ### Analysis for RHC ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_rhc = []

    for i in range(1, 3000, 50):
        print(i)
        rhc_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                            activation='identity',
                                            algorithm='random_hill_climb',
                                            bias=False,
                                            is_classifier=True,
                                            learning_rate=0.6,
                                            clip_max=1,
                                            max_attempts=1000,
                                            max_iters=i)

        start = time.time()
        rhc_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = rhc_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = rhc_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_rhc.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (RHC)')
    plt.legend()
    plt.savefig('testacc_iter_rhc.png')

    print("Finished RHC")

    ### Analysis for Simulated Annealing ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_sa = []

    for i in range(1, 3000, 50):
        print(i)
        sa_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='simulated_annealing',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           max_iters=i)

        start = time.time()
        sa_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = sa_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = sa_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_sa.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (SA)')
    plt.legend()
    plt.savefig('testacc_iter_SA.png')

    print("Finished SA")

    ### Analysis for Genetic Algorithms ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_ga = []

    for i in range(1, 3000, 50):
        print(i)
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           max_iters=i)

        start = time.time()
        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_ga.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA)')
    plt.legend()
    plt.savefig('testacc_iter_GA.png')

    print("Finished GA")

    ### Backpropogation (for comparison) ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_bp = []
    print("backprop start")
    for i in range(1, 3000, 50):
        print(i)
        bp_nn = MLPClassifier(hidden_layer_sizes=(50, ),
                              activation='logistic',
                              max_iter=i)
        # bp_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2], activation='identity',
        #                         algorithm='gradient_descent',
        #                         bias=False, is_classifier=True,
        #                         learning_rate = 0.6, clip_max=1,
        #                         max_attempts=1000, max_iters = i)

        start = time.time()
        bp_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = bp_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = bp_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_bp.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (Backpropogation)')
    plt.legend()
    plt.savefig('testacc_iter_bp.png')

    print("Finished Backprop")

    ### Plot runtimes for above ###
    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_rhc),
             label='RHC')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_sa),
             label='SA')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_ga),
             label='GA')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_ga),
             label='BP')
    plt.xlabel('Iterations')
    plt.ylabel('Training Time')
    plt.title('Training Time vs Iterations')
    plt.legend()
    plt.savefig('time_vs_iter.png')

    #### Hyperparameter Tuning - RHC ####
    ## Adjusting the number of random restarts ##
    train_accuracy_scores = []
    test_accuracy_scores = []

    for i in range(0, 500, 25):
        print(i)
        rhc_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                            activation='identity',
                                            algorithm='random_hill_climb',
                                            bias=False,
                                            is_classifier=True,
                                            learning_rate=0.6,
                                            clip_max=1,
                                            max_attempts=1000,
                                            restarts=i)

        rhc_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = rhc_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = rhc_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(np.arange(0, 500, 25),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(0, 500, 25),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Restarts')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Number of Restarts (RHC)')
    plt.legend()
    plt.savefig('rhc_restarts.png')

    print("Finished RHC HP Tuning")

    #### Hyperparameter Tuning - SA ####
    ## Adjusting the type of scheduling ##
    train_accuracy_scores = []
    test_accuracy_scores = []

    # Referending sectiion 2.2 'Decay Schedules' here:
    # https://readthedocs.org/projects/mlrose/downloads/pdf/stable/

    schedule_types = [
        mlrose_hiive.ExpDecay(),
        mlrose_hiive.ArithDecay(),
        mlrose_hiive.GeomDecay()
    ]

    for st in schedule_types:
        print(st)
        sa_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='simulated_annealing',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           schedule=st)

        sa_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = sa_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = sa_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(['ExpDecay', 'ArithDecay', 'GeomDecay'],
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(['ExpDecay', 'ArithDecay', 'GeomDecay'],
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Schedule Type')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Schedule Type (SA)')
    plt.legend()
    plt.savefig('sa_schedule_type.png')

    print("Finished SA HP Tuning")

    #### Hyperparameter Tuning - GA ####

    ## Adjusting the amount of mutation
    ## Used api as referenced in https://readthedocs.org/projects/mlrose/downloads/pdf/stable/
    train_accuracy_scores = []
    test_accuracy_scores = []

    mutation_prob_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    for i in mutation_prob_array:
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='relu',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           mutation_prob=i)

        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(mutation_prob_array,
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(mutation_prob_array,
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('mutation_prob')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA - mutation_prob experimentation)')
    plt.legend()
    plt.savefig('ga_mutation.png')

    print("Finished GA mutation experimentation")

    ## Adjusting the population size
    ## Used api as referenced in https://readthedocs.org/projects/mlrose/downloads/pdf/stable/
    train_accuracy_scores = []
    test_accuracy_scores = []

    pop_size_array = [100, 200, 300, 400, 500]
    for i in pop_size_array:
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='relu',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           pop_size=i)

        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(pop_size_array,
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(pop_size_array,
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('pop_size')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA - pop_size experimentation)')
    plt.legend()
    plt.savefig('ga_popsize.png')

    print("Finished GA pop_size experimentation")
Пример #6
0
def sa(problem, iterations, random_seed, graph_file, graph_title):
    decays = [0.001, 0.002, 0.003, 0.004, 0.005]
    best_score = []
    time_taken = []
    fn_evals_taken = []
    # fig1, ax1 = plt.subplots()
    # fig2, ax2 = plt.subplots()
    global eval_count
    for decay in decays:
        schedule = mlrose_hiive.ArithDecay(init_temp=1.0, decay=decay)
        fitness = []
        fit_time = []
        fn_evals = []
        for i in iterations:
            eval_count = 0
            start = datetime.datetime.now()
            # Solve using simulated annealing - attempt 1
            best_state, best_fitness, _ = mlrose_hiive.simulated_annealing(problem, schedule=schedule,
                                                                max_iters=i, random_state=random_seed)
            finish = datetime.datetime.now()
            fn_evals.append(eval_count)
            fitness.append(best_fitness)
            fit_time.append((finish - start).total_seconds())
            # print('iteration: ',i)
            # print('best_state:', best_state)
            # print('best_fitness: ', best_fitness)
        best_score.append(max(fitness))
        index = fitness.index(max(fitness))
        time_taken.append(fit_time[index])
        fn_evals_taken.append(fn_evals[index])
        # print('index: ', index)
        # print('time for that: ', fit_time[index])
        plt.plot(iterations, fitness, label="Cooling = " + str(decay))
        # ax2.plot(fn_evals, fitness, label="Cooling = " + str(decay))

    plt.legend(loc="best")
    plt.grid()
    generate_graph(graph_file + "sa_iter", graph_title + "Simulated Annealing", "Iterations", "Fitness")

    """
    ax2.legend(loc="best")
    ax2.grid()
    generate_graph("cp_sa_evals", "Continuous Peaks - Simulated Annealing", "Function evaluations", "Fitness")
    """
    # Decays best_score and time_taken
    plt.plot(decays, best_score)
    plt.grid()
    generate_graph(graph_file + "sa_decays", graph_title + "Simulated Annealing",
                   "Cooling Component", "Best Score Achieved")

    plt.plot(decays, time_taken)
    plt.grid()
    generate_graph(graph_file + "sa_decay_time", graph_title + "Simulated Annealing",
                   "Cooling Component", "Time taken to achieve that")

    plt.scatter(time_taken, best_score)
    for i, txt in enumerate(decays):
        plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
    plt.legend(loc='best', title='Cooling Component')
    plt.grid()
    generate_graph(graph_file + "sa_scatter", graph_title + "Simulated Annealing",
                   "Time Taken", "Best Score achieved")

    print('decays: ', decays)
    print('Best scores reached: ', best_score)
    print('Time taken to do that: ', time_taken)
    print('Function evaluations taken: ', fn_evals_taken)
y_test_hot = oh.transform(y_test.reshape(-1, 1)).todense().astype(int)

# lrs = [0.002]  # 6
# lrs = [0.002, 0.004, 0.02, 0.06, 0.2, 0.8]  # 6
lrs = [0.003,  0.05, 0.7]  # 6
# mil = [100]  # 4
# mil = [100, 500, 1000, 2500]  # 4
mil = [145, 435, 1300]  # 4
# acti = ['tanh']
acti = ['relu', 'sigmoid', 'tanh']
NNparams = np.array(np.meshgrid(lrs,mil,acti),dtype=object).T.reshape(-1,3)
# rsts = [10]
rsts = [7, 12]
# rsts = [5, 10, 15]
# schs = [mlr.ArithDecay(15)]
schs = [mlr.ArithDecay(25), mlr.ExpDecay(25), mlr.GeomDecay(25)]
# schs = [mlr.ArithDecay(15), mlr.ExpDecay(15), mlr.GeomDecay(15),
        # mlr.ArithDecay(175), mlr.ExpDecay(175), mlr.GeomDecay(175)]
# pops = [225]
pops = [150, 225]
# pops = [75, 150, 225]
# muts = [0.25]
muts = [0.33, 0.66]
# muts = [0.25, 0.5, 0.75]
gaParams = np.array(np.meshgrid(pops, muts)).T.reshape(-1,2)

experiments = []
for lr, mi, act in NNparams:
    for rst in rsts:
        experiments.append(mlr.NeuralNetwork(hidden_nodes=[7,5],
                                activation=act,
Пример #8
0
from visualization.plot_graphs import plot_sim_ann_schedules

input_sizes = [25, 50, 75, 100, 150, 200, 300]

opt_probs = [
    (get_one_max, 'one max'),
    (get_four_peaks, 'four peaks'),
    (get_six_peaks, 'six peaks'),
    (get_continuous_peaks, 'continuous peaks'),
    (get_flip_flop, 'flip flop'),
]

sim_ann = get_sim_ann

options = [(mlrose.GeomDecay(), 'geometric'),
           (mlrose.ArithDecay(), 'arithmetic'),
           (mlrose.ExpDecay(), 'exponential')]

metric_dict = {prob[1]: {} for prob in opt_probs}

for size in input_sizes:
    print('####################################### New input size ', size,
          ' ##############################################')
    for prob in opt_probs:
        problem_name = prob[1]
        problem = prob[0](size)
        print('----------------------------------New Problem ', problem_name,
              ' ------------------------------------------')
        for opt in options:
            option_name = opt[1]
            opt_val = opt[0]
Пример #9
0
trial_curves = []
for seed in seeds:
    best_model = None
    best_recall = -1
    fit_times = []
    fit_is_recalls = []
    fit_os_recalls = []
    fit_iters = []
    fit_losses = []
    fit_curves = []
    candidates = [mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.001, schedule=mlr.GeomDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0001, schedule=mlr.GeomDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.00001, schedule=mlr.GeomDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.000001, schedule=mlr.GeomDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0000001, schedule=mlr.GeomDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed), \
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.001, schedule=mlr.ArithDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0001, schedule=mlr.ArithDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.00001, schedule=mlr.ArithDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.000001, schedule=mlr.ArithDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0000001, schedule=mlr.ArithDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.001, schedule=mlr.ExpDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0001, schedule=mlr.ExpDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.00001, schedule=mlr.ExpDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.000001, schedule=mlr.ExpDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed),\
                  mlr.NeuralNetwork(hidden_nodes=[13,10,7,5,1], activation='relu', algorithm = 'random_hill_climb', max_iters=1000, bias=True, is_classifier=True, learning_rate=0.0000001, schedule=mlr.ExpDecay(), early_stopping=False, curve=True, max_attempts = 100, random_state = seed)]
    for i in range(len(candidates)):
        init_weights = np.random.uniform(-0.5,0.5,423)
        bp = candidates[i]
        start_time = time.time()
        bp.fit(X_train_sc_1000, y_train_1000, init_weights=init_weights)
        end_time = time.time()
Пример #10
0
    train_acc = accuracy_score(y_train, train_pred)
    valid_acc = accuracy_score(y_valid, valid_pred)

    rhc_losses.append(nn_rhc.loss)
    rhc_times.append(end - start)
    rhc_train.append(train_acc)
    rhc_valid.append(valid_acc)

    print(f"  RHC Valid Acc: {valid_acc}")

    # simulated annealing
    its = i * 5
    min_t = 0.0001
    max_t = 1.0
    r = 0.002  #r = (mint/maxt)**(1/i)
    sch = mlr.ArithDecay(max_t, r, min_t)  #mlr.ArithDecay mlr.ExpDecay
    nn_sa = mlr.NeuralNetwork(
        hidden_nodes=hidden_nodes,
        activation='sigmoid',
        curve=False,
        algorithm='simulated_annealing',
        max_iters=its,
        random_state=seed,
        learning_rate=0.75,  #3.57
        early_stopping=False,
        schedule=sch)

    start = time.time()
    nn_sa.fit(X_train_std, y_train)
    end = time.time()
    train_pred = nn_sa.predict(X_train_std)
Пример #11
0
attempts = max_it / 2

rhc_fitnesses = []
sa_fitnesses = []
ga_fitnesses = []
m_fitnesses = []
rhc_times = []
sa_times = []
ga_times = []
m_times = []

test_range = [4, 6, 8, 10, 15, 20, 30]
final_prob_len = 60

fitness_cust = mlr.CustomFitness(orderedmax)
schedule = mlr.ArithDecay()

print(f"Attempts:            {attempts}")
print(f"Max Iterations:      {max_it}")
print(f"Problem Sizes:       {test_range}")
print(f"Last Problem Size:   {final_prob_len}\n\n")

print(f"\n######### PART 2 #########\n")

part2_time = 0.0

for i in test_range:
    start = time.time()
    print(f"Running for subproblem size: {i}")

    init = np.random.choice(i, size=i, replace=False)
Пример #12
0
def plot_sa_graph(prob_type):
    try:
        os.mkdir("./SA")
    except FileExistsError:
        pass
    except OSError as error:
        print("Error creating directory results.")

    print('\n plot_sa_graph - Progress: ', end="")

    fitness, problem = get_fitness_function(prob_type)
    process_time = []
    fitness_score = []
    for j in range(3):
        process_time.append([])
        fitness_score.append([])
        if j != 2:
            decay_const = [
                0.005, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 0.99
            ]
        else:
            decay_const = [
                0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99
            ]
        for i in decay_const:
            print('.', end="")
            if j == 0:
                decay = mlrose.GeomDecay(decay=i)
            elif j == 1:
                decay = mlrose.ExpDecay(exp_const=i)
            else:
                decay = mlrose.ArithDecay(decay=i)
            start = time.time()
            best_state, best_fitness, fitness_curve = mlrose.simulated_annealing(
                problem,
                schedule=decay,
                max_attempts=1000,
                max_iters=100,
                random_state=10)
            fitness_score[j].append(best_fitness)
            process_time[j].append(time.time() - start)

    plt.figure(100)
    plt.plot([0.005, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 0.99],
             fitness_score[0],
             'r',
             label='GeomDecay')
    plt.plot([0.005, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 0.99],
             fitness_score[1],
             'b',
             label='ExpDecay')
    plt.plot([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99],
             fitness_score[2],
             'g',
             label='ArithDecay')
    plt.xlim(0.0001, 0.2)
    plt.legend()
    plt.xlabel("decay constant")
    plt.ylabel("fitness score")
    plt.title('Simulated Annealing')
    plt.savefig("SA/" + "Fitness.png")

    plt.figure(101)
    plt.plot([0.005, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 0.99],
             process_time[0],
             'r',
             label='GeomDecay')
    plt.plot([0.005, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 0.99],
             process_time[1],
             'b',
             label='ExpDecay')
    plt.plot([0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99],
             process_time[2],
             'g',
             label='ArithDecay')
    plt.xlim(0.0001, 0.2)
    plt.legend()
    plt.xlabel("decay constant")
    plt.ylabel("process time")
    plt.title('Simulated Annealing')
    plt.savefig("SA/" + "Time.png")
Пример #13
0
                   max_attempts=200,
                   iteration_list=[2000],
                   temperature_list=[0.01, 0.1, 1, 10, 100, 1000],
                   decay_list=[mlrh.GeomDecay, mlrh.ExpDecay, mlrh.ArithDecay])
sa_stats, sa_curve = sa.run()


columns = ['Time', 'Fitness', 'Temperature', 'schedule_type']
df=pd.read_csv("./queen/queen_prob/sa__queen_prob__run_stats_df.csv")
print(df[columns].sort_values(by=['Fitness'], ascending=False))


max_attempts = 200
max_iters = 2000
init_temp = 0.1
schedule = mlrh.ArithDecay(init_temp)
eval_count = 0
best_state, best_fitness, sa_curve = mlrh.simulated_annealing(prob,
                                                             max_attempts=max_attempts,
                                                             max_iters=max_iters,
                                                             random_state=random_state,
                                                             schedule=schedule,
                                                             curve=True)
print("Simulated Annealing - Total Function Evaluations:", eval_count)
plot_fitness_iteration('fitness_iteration_sa_tsp.png', sa_curve,
                       "TSP - Simulated Annealing: schedule: {}, init_temp: {}".format(schedule.__class__.__name__, init_temp))

# GA
ga = mlrh.GARunner(problem=prob,
                   experiment_name=experiment_name,
                   output_directory=output_directory,
Пример #14
0
def simulated_annealing_experiment(optimization_problem, hparams,
                                   output_fn_base):
    metrics = {}
    logs = []
    print("----Running Simulated Annealing Experiment-----")
    print("Hyperparameters Used: ")
    print(hparams)

    schedule = None
    if (hparams["decay"] == "Geometric"):
        schedule = mlrose.GeomDecay(init_temp=hparams["initial_temp"])
    elif (hparams["decay"] == "Arithmetic"):
        schedule = mlrose.ArithDecay(init_temp=hparams["initial_temp"])
    else:
        schedule = mlrose.ExpDecay(init_temp=hparams["initial_temp"])

    best_state, best_fitness, _ = mlrose.simulated_annealing(
        optimization_problem, schedule, hparams["max_attempts"],
        hparams["max_iters"])

    # Iterations and runtime
    fitness_scores = []
    runtimes = []
    iteration_count = range(1, hparams["max_iters"])
    for iter in iteration_count:
        start_time = time.time()
        best_state, best_fitness, _ = mlrose.simulated_annealing(
            optimization_problem, schedule, hparams["max_attempts"], iter)
        end_time = time.time()
        runtimes.append(end_time - start_time)
        fitness_scores.append(-best_fitness if optimization_problem.
                              prob_type == 'tsp' else best_fitness)

    plot_single(fitness_scores,
                "%s/Annealing/annealing_fitness_iterations" % output_fn_base,
                xvals=iteration_count)
    plot_runtimes(runtimes,
                  "%s/Annealing/annealing_runtime_iterations" % output_fn_base)
    metrics["runtimes"] = runtimes
    metrics["fitness"] = fitness_scores

    # intial temp
    fitness_scores_geo = []
    fitness_scores_arith = []
    fitness_scores_exp = []
    temps = range(1, 100)

    for temp in temps:
        decays = [
            mlrose.GeomDecay(init_temp=temp),
            mlrose.ArithDecay(init_temp=temp),
            mlrose.ExpDecay(init_temp=temp)
        ]
        for i in range(0, len(decays)):
            best_state, best_fitness, _ = mlrose.simulated_annealing(
                optimization_problem, decays[i], hparams["max_attempts"], 400)
            if (i == 0):
                fitness_scores_geo.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)
            elif (i == 1):
                fitness_scores_arith.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)
            else:
                fitness_scores_exp.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)

    # Iterations and runtime
    decays = [
        mlrose.GeomDecay(init_temp=temp),
        mlrose.ArithDecay(init_temp=temp),
        mlrose.ExpDecay(init_temp=temp)
    ]
    fitness_scores_geo2 = []
    fitness_scores_arith2 = []
    fitness_scores_exp2 = []
    for i in range(0, len(decays)):
        if (i == 0):
            iteration_count2 = range(1, 2 * hparams["max_iters"])
            for iter in iteration_count2:
                best_state, best_fitness, _ = mlrose.simulated_annealing(
                    optimization_problem, decays[i], hparams["max_attempts"],
                    iter)
                fitness_scores_geo2.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)
        elif (i == 1):
            iteration_count2 = range(1, 2 * hparams["max_iters"])
            for iter in iteration_count2:
                best_state, best_fitness, _ = mlrose.simulated_annealing(
                    optimization_problem, decays[i], hparams["max_attempts"],
                    iter)
                fitness_scores_arith2.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)
        else:
            iteration_count2 = range(1, 2 * hparams["max_iters"])
            for iter in iteration_count2:

                best_state, best_fitness, _ = mlrose.simulated_annealing(
                    optimization_problem, decays[i], hparams["max_attempts"],
                    iter)
                fitness_scores_exp2.append(
                    -best_fitness if optimization_problem.prob_type ==
                    'tsp' else best_fitness)

    plot_single(fitness_scores,
                "%s/Annealing/annealing_fitness_iterations" % output_fn_base,
                xvals=iteration_count)
    plot_runtimes(runtimes,
                  "%s/Annealing/annealing_runtime_iterations" % output_fn_base)
    metrics["runtimes"] = runtimes
    metrics["fitness"] = fitness_scores

    plot_multiple(
        [fitness_scores_geo2, fitness_scores_arith2, fitness_scores_exp2], [
            "Geometric Decay Fitness", "Arithmetic Decay Fitness",
            "Exponential Decay Fitness"
        ],
        "%s/Annealing/sa_fitness_decay_iterations" % output_fn_base,
        title="SA Fitness Scores Over Iterations - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Iterations")

    plot_multiple(
        [fitness_scores_geo, fitness_scores_arith, fitness_scores_exp], [
            "Geometric Decay Fitness", "Arithmetic Decay Fitness",
            "Exponential Decay Fitness"
        ],
        "%s/Annealing/sa_fitness_decay" % output_fn_base,
        title="SA Fitness Scores Over Various Temps - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Initial Temperature")

    plot_multiple(
        [fitness_scores_geo], ["Geometric Decay Fitness"],
        "%s/Annealing/sa_fitness_decay_geo" % output_fn_base,
        title="SA Fitness Over Various Temps - Geometric Decay - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Initial Temperature")
    plot_multiple(
        [fitness_scores_arith], ["Arithmetic Decay Fitness"],
        "%s/Annealing/sa_fitness_decay_arith" % output_fn_base,
        title="SA Fitness Over Various Temps - Arithmetic Decay - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Initial Temperature")
    plot_multiple(
        [fitness_scores_exp], ["Exponential Decay Fitness"],
        "%s/Annealing/sa_fitness_decay_exp" % output_fn_base,
        title="SA Fitness Over Various Temps - Exponential Decay - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Initial Temperature")

    plot_multiple(
        [
            pd.Series(fitness_scores_geo).rolling(window=10).mean(),
            pd.Series(fitness_scores_arith).rolling(window=10).mean(),
            pd.Series(fitness_scores_exp).rolling(window=10).mean()
        ], [
            "Geometric Decay Fitness", "Arithmetic Decay Fitness",
            "Exponential Decay Fitness"
        ],
        "%s/Annealing/sa_fitness_decay_sma" % output_fn_base,
        title="SA Fitness Scores Over Various Temps - Rolling Mean - %s" %
        get_name_of_experiment(output_fn_base),
        xlab="Initial Temperature")

    logs.append("\tHyperparameters: \n")
    logs.append("\t%s" % str(hparams))
    logs.append("\n\n\tBest State: \n\t\t")
    logs.append(str(list(best_state)))
    logs.append("\n\tBest Fitness: \n\t\t")
    logs.append(str(best_fitness))

    return logs, metrics