Esempio n. 1
0
def maxKColor(edges, nodes, colors):
    fitness = mlrose.MaxKColor(edges)

    problem = mlrose.DiscreteOpt(length = nodes, fitness_fn = fitness, maximize = False, max_val = colors)
    t0 = time()
    best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts=100, max_iters=np.inf,
                          init_state=None)
    finish = time() - t0

    print(best_state)
    print(best_fitness)
    print(finish)
Esempio n. 2
0
    def get_prob(self, t_pct=None, p_length=None):
        if self.prob_name == 'Four Peaks':
            fitness = mlrose.FourPeaks(t_pct)
            p_len = 100
            self.schedule = mlrose.ExpDecay()
            self.restarts = 0
            self.mutation_prob = 0.1
            self.keep_pct = 0.1
            self.pop_size = 500
        elif self.prob_name == "Continuous Peaks":
            fitness = mlrose.ContinuousPeaks(t_pct)
            p_len = 100
            self.schedule = mlrose.GeomDecay()
            self.restarts = 0
            self.mutation_prob = 0.1
            self.keep_pct = 0.2
            self.pop_size = 200
        elif self.prob_name == "Max K Color":
            fitness = mlrose.MaxKColor(self.COLOREDGE)
            p_len = 100
            self.schedule = mlrose.ExpDecay()
            self.restarts = 0
            self.mutation_prob = 0.2
            self.keep_pct = 0.2
            self.pop_size = 200
        elif self.prob_name == "Flip Flop":
            fitness = mlrose.FlipFlop()
            p_len = 100
            self.schedule = mlrose.ArithDecay()
            self.restarts = 0
            self.mutation_prob = 0.2
            self.keep_pct = 0.5
            self.pop_size = 500
        elif self.prob_name == "One Max":
            fitness = mlrose.OneMax()
            p_len = 100
            self.schedule = mlrose.GeomDecay()
            self.restarts = 0
            self.mutation_prob = 0.2
            self.keep_pct = 0.1
            self.pop_size = 100
        else:
            fitness = None
            p_len = 0

        if p_length is None:
            p_length = p_len

        problem = mlrose.DiscreteOpt(length=p_length, fitness_fn=fitness)
        init_state = np.random.randint(2, size=p_length)
        return problem, init_state
Esempio n. 3
0
def KColorMax(state, edges, colors):
    print("In KColorMax Function")
    fitness = mlrose.MaxKColor(edges)
    problem = mlrose.DiscreteOpt(length=state.shape[0],
                                 fitness_fn=fitness,
                                 maximize=False,
                                 max_val=colors)
    rh_iterations = [10, 25, 50, 75, 100, 125, 200, 250, 300]
    max_attempts = 10
    rh_fitness = []
    sm_fitness = []
    ga_fitness = []
    mi_fitness = []
    time_iterations = []
    #print("Random Hill Iterations")
    #print(rh_iterations)
    for itera in rh_iterations:
        color_rh_st = timeit.timeit()
        rh_best_fitness = randomHill(problem, state, max_attempts, itera)
        color_rh_et = timeit.timeit()
        time_iterations.append(color_rh_et - color_rh_st)
        color_sm_st = timeit.timeit()
        sm_best_fitness = simulatedAnnealing(problem, state, max_attempts,
                                             itera)
        color_sm_et = timeit.timeit()
        time_iterations.append(color_sm_et - color_sm_st)
        color_ga_st = timeit.timeit()
        ga_best_fitness = genetic(problem, state, max_attempts, itera)
        color_ga_et = timeit.timeit()
        time_iterations.append(color_ga_et - color_ga_st)
        color_mi_st = timeit.timeit()
        mi_best_fitness = mimic(problem, state, max_attempts, itera)
        color_mi_et = timeit.timeit()
        time_iterations.append(color_mi_et - color_mi_st)
        rh_fitness.append(rh_best_fitness)
        sm_fitness.append(sm_best_fitness)
        ga_fitness.append(ga_best_fitness)
        mi_fitness.append(mi_best_fitness)
        #print("rh_best_fitness inside while loop")
        #print(rh_best_fitness)
    return rh_iterations, rh_fitness, sm_fitness, ga_fitness, mi_fitness, time_iterations
import mlrose
import numpy as np
import time

# Initialize fitness function object using state
nodes = [i for i in range(100)]
it = iter(nodes)
edges = [(x, next(it)) for x in it]

fitness = mlrose.MaxKColor(edges=edges)

# Define optimization problem object
problem_fit = mlrose.DiscreteOpt(length=100,
                                 fitness_fn=fitness,
                                 maximize=True,
                                 max_val=2)

# Set random seed
np.random.seed(2)

## Solve problem using the genetic algorithm
start_time = time.time()
best_state1, best_fitness1 = mlrose.random_hill_climb(problem_fit,
                                                      max_attempts=100,
                                                      max_iters=100)

start_time2 = time.time()
best_state2, best_fitness2 = mlrose.simulated_annealing(
    problem_fit, schedule=mlrose.GeomDecay(), max_attempts=100, max_iters=100)

start_time3 = time.time()
Esempio n. 5
0
multipeak_fn = mlrose.CustomFitness(multipeak_eq)

# Knapsack
knapsack_weights = [5, 10, 15, 20, 25, 30, 35, 40, 45]
knapsack_values = np.arange(1, len(knapsack_weights) + 1)
knapsack_max_weight = 0.7
knapsack_fn = mlrose.Knapsack(knapsack_weights, knapsack_values,
                              knapsack_max_weight)

# K-colors
kcolor_edges = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),
                (0, 9), (1, 4), (1, 5), (1, 6), (1, 7), (2, 3), (2, 5), (2, 7),
                (3, 5), (3, 6), (3, 7), (3, 9), (4, 5), (5, 6), (5, 7), (5, 8),
                (6, 7), (7, 8), (8, 9)]
kcolor_edges = list({tuple(sorted(edge)) for edge in kcolor_edges})
kcolor_fn = mlrose.MaxKColor(kcolor_edges)


def multipeak_prob_wrapper():
    return mlrose.DiscreteOpt(length=20, fitness_fn=multipeak_fn)


def knapsack_prob_wrapper():
    return mlrose.DiscreteOpt(length=len(knapsack_weights),
                              fitness_fn=knapsack_fn)


def kcolor_prob_wrapper():
    return mlrose.DiscreteOpt(length=10, fitness_fn=kcolor_fn, max_val=2)

    print("Accuracy: ", aveAccuracyScoreSA)
    print('Genetic Algorithm: ')
    print("Accuracy: ", aveAccuracyScoreGA)
    print(
        '///////////////////////////////////////////////////////////////////////////////'
    )

    edges = [(0, 1), (1, 4), (1, 3), (2, 4), (3, 7), (4, 5), (4, 6), (5, 6),
             (5, 7), (6, 7), (5, 3), (0, 3), (0, 2), (1, 7), (1, 6), (0, 4),
             (1, 2), (3, 4), (8, 0), (8, 4), (8, 2), (8, 1)]
    weights = [3, 4, 5, 7, 9, 6, 10, 11]
    values = [1, 2, 3, 4, 5, 6, 7, 8]
    maxWeightPct = 2
    fitnessArray = [
        mlrose.Queens(),
        mlrose.MaxKColor(edges),
        mlrose.Knapsack(weights, values, maxWeightPct)
    ]
    titleArray = ['Queens', 'Max Color', 'Knapsack']
    for x in range(len(fitnessArray)):
        aveFitnessRHC = 0
        aveFitnessSA = 0
        aveFitnessGA = 0
        aveFitnessM = 0
        aveTimeRHC = 0
        aveTimeSA = 0
        aveTimeGA = 0
        aveTimeM = 0
        for y in range(0, 10):
            print('Results for: ', titleArray[x])
            bestFitnessRHC, timeRHC = radomHillClimb(fitnessArray[x], x)
Esempio n. 7
0
def __discrete_bit_size_problems(problem,
                                 algorithm,
                                 length,
                                 max_iter,
                                 max_attempt,
                                 init_state,
                                 edges=None,
                                 coords=None):
    if problem == 'fourpeaks':
        __fit = mlrose.FourPeaks()
        __problem = mlrose.DiscreteOpt(length=length,
                                       fitness_fn=__fit,
                                       maximize=True,
                                       max_val=2)
    elif problem == 'kcolor':
        __fit = mlrose.MaxKColor(edges=edges)
        __problem = mlrose.DiscreteOpt(length=length,
                                       fitness_fn=__fit,
                                       maximize=True)
    elif problem == 'flipflop':
        __fit = mlrose.OneMax()
        __problem = mlrose.DiscreteOpt(length=length,
                                       fitness_fn=__fit,
                                       maximize=True,
                                       max_val=2)
    elif problem == 'continouspeaks':
        __fit = mlrose.ContinuousPeaks()
        __problem = mlrose.DiscreteOpt(length=length,
                                       fitness_fn=__fit,
                                       maximize=True,
                                       max_val=2)
    elif problem == 'travellingsales':
        __fit = mlrose.TravellingSales(coords=coords)
        __problem = mlrose.TSPOpt(length=length,
                                  fitness_fn=__fit,
                                  maximize=False)

    if algorithm == 'random_hill_climb':
        start_time = time.time()
        best_state, best_fitness, best_curve = mlrose.random_hill_climb(
            __problem,
            max_iters=max_iter,
            max_attempts=max_attempt,
            init_state=init_state,
            curve=True)
        end_time = time.time() - start_time
    elif algorithm == 'simulated_annealing':
        start_time = time.time()
        best_state, best_fitness, best_curve = mlrose.simulated_annealing(
            __problem,
            max_iters=max_iter,
            max_attempts=max_attempt,
            init_state=init_state,
            curve=True)
        end_time = time.time() - start_time
    elif algorithm == 'genetic_alg':
        start_time = time.time()
        best_state, best_fitness, best_curve = mlrose.genetic_alg(
            __problem,
            max_iters=max_iter,
            max_attempts=max_attempt,
            curve=True)
        end_time = time.time() - start_time
    elif algorithm == 'mimic':
        start_time = time.time()
        best_state, best_fitness, best_curve = mlrose.mimic(
            __problem,
            max_iters=max_iter,
            max_attempts=max_attempt,
            curve=True)
        end_time = time.time() - start_time

    return best_fitness, end_time, best_curve
Esempio n. 8
0
def k_COLOR():
    problem = mlrose.DiscreteOpt(
        length=20,
        fitness_fn=mlrose.MaxKColor(
            edges=[(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]),
        maximize=True,
        max_val=2)
    init_state = np.array([0] * 20)
    startTime = datetime.now()
    best_state, best_fitness, fitness_curve_rhc = mlrose.random_hill_climb(
        problem,
        max_attempts=1000,
        max_iters=2500,
        restarts=0,
        init_state=init_state,
        curve=True,
        random_state=1,
        state_fitness_callback=None,
        callback_user_info=None)

    totalTime = datetime.now() - startTime
    rhcTime = totalTime.total_seconds()
    print("RHC")
    print("Time: ", rhcTime)
    print("best_fitness: ", best_fitness)
    print("Iteration: %d " % len(fitness_curve_rhc))

    ###############################
    startTime = datetime.now()
    best_statesa, best_fitnesssa, fitness_curve_sa = mlrose.simulated_annealing(
        problem,
        max_attempts=1000,
        max_iters=2500,
        init_state=init_state,
        curve=True,
        random_state=1,
        state_fitness_callback=None,
        callback_user_info=None)

    totalTime = datetime.now() - startTime
    saTime = totalTime.total_seconds()
    print("SA")
    print("Time: ", saTime)
    print("best_fitness: ", best_fitnesssa)
    print("Iteration: %d " % len(fitness_curve_sa))

    ###############################
    startTime = datetime.now()
    best_statega, best_fitnessga, fitness_curve_ga = mlrose.genetic_alg(
        problem,
        max_attempts=1000,
        max_iters=2500,
        curve=True,
        random_state=1,
        state_fitness_callback=None,
        callback_user_info=None)

    totalTime = datetime.now() - startTime
    gaTime = totalTime.total_seconds()
    print("GA")
    print("Time: ", gaTime)
    print("best_fitness: ", best_fitnessga)
    print("Iteration:  %d " % len(fitness_curve_ga))

    ###############################
    startTime = datetime.now()
    best_statemm, best_fitnessmm, fitness_curve_mm = mlrose.mimic(
        problem,
        max_attempts=1000,
        max_iters=2500,
        curve=True,
        random_state=1,
        state_fitness_callback=None,
        callback_user_info=None)

    totalTime = datetime.now() - startTime
    mmTime = totalTime.total_seconds()
    print("MIMIC")
    print("Time: ", mmTime)
    print("best_fitness: ", best_fitnessmm)
    print("Iteration: %d " % len(fitness_curve_mm))
Esempio n. 9
0
def main():
    name_of_exp = "K-Color"
    edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
    fitness = mlrose.MaxKColor(edges)
    init_state = np.zeros(5)
    problem = mlrose.DiscreteOpt(length=5, fitness_fn=fitness, maximize=False, max_val=2)

    # Define decay schedule
    schedule = mlrose.ExpDecay()

    x_s = []
    y_s = []
    z_s = ['RHC', 'SA', 'GA', 'MIMIC']
    w_s = []
    max_val = 3.0
    found_flag = False
    for restarts in np.arange(0, 5):
        if found_flag:
            break
        for max_iter_atts in np.arange(10, 1000, 10):
            if found_flag:
                break
            # Solve problem using simulated annealing
            best_state, best_fitness, learning_curve, timing_curve = mlrose.random_hill_climb(problem, max_attempts=int(
                max_iter_atts), max_iters=int(max_iter_atts),
                                                                                              restarts=int(restarts),
                                                                                              init_state=init_state,
                                                                                              curve=True,
                                                                                              random_state=1)
            if best_fitness >= max_val:
                x_s.append(np.arange(0, len(learning_curve)))
                y_s.append(learning_curve)
                w_s.append(timing_curve)
                print(best_state)
                print(best_fitness)
                print(max_iter_atts)
                print(restarts)
                found_flag = True

    found_flag = False
    for sched in [mlrose.ExpDecay(), mlrose.GeomDecay(), mlrose.ArithDecay()]:
        if found_flag:
            break
        for max_iter_atts in np.arange(10, 1000, 10):
            if found_flag:
                break
            best_state, best_fitness, learning_curve, timing_curve = mlrose.simulated_annealing(problem,
                                                                                                max_attempts=int(
                                                                                                    max_iter_atts),
                                                                                                max_iters=int(
                                                                                                    max_iter_atts),
                                                                                                schedule=sched,
                                                                                                init_state=init_state,
                                                                                                curve=True,
                                                                                                random_state=1)
            if best_fitness >= max_val:
                x_s.append(np.arange(0, len(learning_curve)))
                y_s.append(learning_curve)
                w_s.append(timing_curve)
                print(best_state)
                print(best_fitness)
                print(max_iter_atts)
                print(sched)
                found_flag = True

    found_flag = False
    for prob in np.arange(0.1, 1.1, 0.1):
        if found_flag:
            break
        for pop_size in np.arange(100, 5000, 100):
            if found_flag:
                break
            for max_iter_atts in np.arange(10, 1000, 10):
                if found_flag:
                    break
                best_state, best_fitness, learning_curve, timing_curve = mlrose.genetic_alg(problem,
                                                                                            pop_size=int(pop_size),
                                                                                            mutation_prob=prob,
                                                                                            max_attempts=int(
                                                                                                max_iter_atts),
                                                                                            max_iters=int(
                                                                                                max_iter_atts),
                                                                                            curve=True,
                                                                                            random_state=1)
                if best_fitness >= max_val:
                    x_s.append(np.arange(0, len(learning_curve)))
                    y_s.append(learning_curve)
                    w_s.append(timing_curve)
                    print(best_state)
                    print(best_fitness)
                    print(max_iter_atts)
                    print(prob)
                    print(pop_size)
                    found_flag = True

    found_flag = False
    for prob in np.arange(0.1, 1.1, 0.1):
        if found_flag:
            break
        for pop_size in np.arange(100, 5000, 100):
            if found_flag:
                break
            for max_iter_atts in np.arange(10, 1000, 10):
                if found_flag:
                    break
                best_state, best_fitness, learning_curve, timing_curve = mlrose.mimic(problem, pop_size=int(pop_size),
                                                                                      keep_pct=prob,
                                                                                      max_attempts=int(max_iter_atts),
                                                                                      max_iters=int(max_iter_atts),
                                                                                      curve=True,
                                                                                      random_state=1,
                                                                                      fast_mimic=True)
                if best_fitness >= max_val:
                    x_s.append(np.arange(0, len(learning_curve)))
                    y_s.append(learning_curve)
                    w_s.append(timing_curve)
                    print(best_state)
                    print(best_fitness)
                    print(max_iter_atts)
                    print(prob)
                    print(pop_size)
                    found_flag = True

    for x, y, z in zip(x_s, y_s, z_s):
        plt.plot(x, y, label=z)
    plt.legend()
    plt.title('Randomized Optimization Iterations vs Fitness Function Value for {}'.format(name_of_exp))
    plt.xlabel('Function iteration count')
    plt.ylabel('Fitness function value')
    plt.show()
    plt.clf()
    for x, w, z in zip(x_s, w_s, z_s):
        plt.plot(x, w, label=z)
    plt.legend()
    plt.title('Randomized Optimization Time vs Fitness Function Value for {}'.format(name_of_exp))
    plt.xlabel('Function iteration count')
    plt.ylabel('Time in Seconds')
    plt.show()
Esempio n. 10
0
def optimizationFunction(n,iMin, iMax, iStep, jMin, jMax, jStep):
    
    
    np.random.seed(100)
    optScenarios = ['Knap Sack', 'Four Peaks', 'K - Colors']
    for x in range(3):
        if x == 0:
            weights = np.random.randint(1,10,size=n)
            #values = np.random.randint(1,50,size=n)
            values = [i for i in range(1,n+1)]
            max_weight_pct = 0.5
            fitnessFunction = mlrose.Knapsack(weights, values, max_weight_pct)
            optModel = mlrose.DiscreteOpt(len(values), fitness_fn = fitnessFunction, maximize=True)
        elif x == 1:
            inp = [0] * int(n/2) + [1]*int(n - int(n/2))
            np.random.shuffle(inp)
            fitnessFunction = mlrose.FourPeaks(t_pct = 0.15)
            optModel = mlrose.DiscreteOpt(len(inp), fitness_fn = fitnessFunction, maximize =True)
        elif x == 2:
            edges = [(np.random.randint(0,n), np.random.randint(0,n)) for ab in range(n)]
            fitnessFunction = mlrose.MaxKColor(edges)
            optModel = mlrose.DiscreteOpt(len(edges), fitness_fn = fitnessFunction, maximize =True)
                
        decay = mlrose.ExpDecay()
    

        optResults = {'iterations':[],'attempts':[],'fitness':[],'time':[], 'optimization':[]}
        for i in range(iMin,iMax,iStep):

            for j in range(jMin,jMax,jStep):
                start_time = timer()
                best_state, best_fitness = mlrose.random_hill_climb(optModel, max_attempts = j, max_iters = i, random_state=100)
                opt_time = timer() - start_time
           
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Random Hill')
                start_time = timer()
                best_state, best_fitness = mlrose.simulated_annealing(optModel, schedule=decay, max_attempts = j,max_iters = i,random_state=1000)
                opt_time = timer() - start_time
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Simulated Annealing')
           
                start_time = timer()
                best_state, best_fitness = mlrose.genetic_alg(optModel, pop_size=200, mutation_prob = 0.25, max_attempts = j, max_iters = i, random_state=5000)
                opt_time = timer() - start_time
               
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Genetic Algorithm')
                start_time = timer()
                best_state, best_fitness = mlrose.mimic(optModel, pop_size = 200, keep_pct = 0.3, max_attempts = j, max_iters = i, random_state=150)
                opt_time = timer() - start_time
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('MIMIC')
       
        optResults = pd.DataFrame(optResults)
 
        plotGraphs(optResults,optScenarios[x])
Esempio n. 11
0
import pandas as pd
import math
import time
import mlrose
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D

edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4), (5, 6),
         (6, 8), (7, 6), (3, 6), (6, 8), (1, 9), (3, 9), (6, 9), (7, 9),
         (1, 10), (6, 10), (3, 9), (11, 5), (6, 11), (1, 11), (5, 7), (3, 11),
         (2, 10), (7, 11), (6, 12), (13, 8), (14, 5), (13, 12), (15, 4),
         (12, 16), (17, 18), (18, 19), (19, 2), (20, 4), (6, 19), (11, 21),
         (22, 15), (10, 23)]
schedule = mlrose.ExpDecay()

fitness1 = mlrose.MaxKColor(edges[0:5])
fitness2 = mlrose.MaxKColor(edges[0:7])
fitness3 = mlrose.MaxKColor(edges[0:9])
fitness4 = mlrose.MaxKColor(edges[0:11])
fitness5 = mlrose.MaxKColor(edges[0:13])
fitness6 = mlrose.MaxKColor(edges[0:15])
fitness7 = mlrose.MaxKColor(edges[0:19])
fitness8 = mlrose.MaxKColor(edges[0:21])
fitness9 = mlrose.MaxKColor(edges[0:24])
fitness10 = mlrose.MaxKColor(edges[0:30])

start = time.time()
problem_fit1 = mlrose.DiscreteOpt(length=5,
                                  fitness_fn=fitness1,
                                  maximize=True,
                                  max_val=4)
Esempio n. 12
0
#sprint(fitness_two.evaluate(state))

best_state_two, best_fitness_two = ml.genetic_alg(opt_two, random_state = 2)

#best_state_two, best_fitness_two = ml.random_hill_climb(opt_two, random_state = 2)

best_state_two, best_fitness_two = ml.simulated_annealing(opt_two, max_iters=1, random_state = 2)

#best_state_two, best_fitness_two = ml.mimic(opt_two, max_iters=1, random_state = 2)

print(best_state_two)


print(best_fitness_two)

#plt.scatter([0,1,0,1,2,3],[1,2,2,3,3,4])
#plt.savefig("test.png")
edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]

fitness_three = ml.MaxKColor(edges)
opt_three = ml.DiscreteOpt(length=5, fitness_fn=fitness_three, maximize=False)
#best_state_three, best_fitness_three = ml.genetic_alg(opt_three, random_state = 2)
#best_state_three, best_fitness_three = ml.random_hill_climb(opt_three, random_state = 2)
best_state_three, best_fitness_three = ml.simulated_annealing(opt_three, random_state = 2)
best_state_three, best_fitness_three = ml.mimic(opt_three, max_iters=1, random_state = 2)

print(fitness_three.evaluate([1, 0, 0, 1, 0]))

print(best_state_three)

print(best_fitness_three)
Esempio n. 13
0
import random
import mlrose
import numpy as np
import matplotlib.pyplot as plt
from time import clock
#https://mlrose.readthedocs.io/en/stable/source/fitness.html

edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4), (5, 1)]

edges_long = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3),
              (5, 7), (5, 9), (5, 10), (4, 3), (4, 8), (4, 10), (6, 8), (6, 9),
              (9, 3), (9, 6), (9, 1), (9, 8)]

fitness = mlrose.MaxKColor(edges_long)
init_state = np.array([0, 1, 0, 1, 1])

k = 20
problem = mlrose.DiscreteOpt(length=k, fitness_fn=fitness, maximize=False)

# Define decay schedule
schedule = mlrose.ExpDecay()

RHC_iterations = []
SA_iterations = []
GA_iterations = []
MIMIC_iterations = []
iterations = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]

RHC_timings = []
SA_timings = []
GA_timings = []