Exemplo n.º 1
0
def get_fitness_function(problem_type):
    fitness, problem = None, None

    if problem_type == Problem.KNAPSACK:
        fitness = mlrose.Knapsack(weights=[
            70, 73, 77, 80, 82, 87, 90, 94, 98, 106, 110, 113, 115, 118, 120
        ],
                                  values=[
                                      1.35, 1.39, 1.49, 1.50, 1.56, 1.63, 1.73,
                                      1.84, 1.92, 2.01, 2.10, 2.14, 2.21, 2.29,
                                      2.40
                                  ],
                                  max_weight_pct=0.52)
        problem = mlrose.DiscreteOpt(length=15,
                                     fitness_fn=fitness,
                                     maximize=True,
                                     max_val=2)

    elif problem_type == Problem.NQUEEN:
        fitness = mlrose.Queens()
        problem = mlrose.DiscreteOpt(length=8,
                                     fitness_fn=fitness,
                                     maximize=False,
                                     max_val=8)

    elif problem_type == Problem.FOUR_PEAKS:
        fitness = mlrose.FourPeaks(t_pct=0.15)
        problem = mlrose.DiscreteOpt(length=100,
                                     fitness_fn=fitness,
                                     maximize=True,
                                     max_val=2)

    return fitness, problem
Exemplo n.º 2
0
    def optimize(self):
        problem_size_space = self.problem_size
        # Initializing the problem
        init_state = np.random.randint(0, 3, size=problem_size_space)
        weights = [
            int(np.random.randint(1, problem_size_space / 2))
            for _ in range(problem_size_space)
        ]
        values = [
            int(np.random.randint(1, problem_size_space / 2))
            for _ in range(problem_size_space)
        ]
        # print('weight:',weights)
        # print('value:',values)

        fitness = mlrose.Knapsack(
            weights=weights, values=values, max_weight_pct=1.0
        )  #max_weight_pct (float, default: 0.35) – Parameter used to set maximum capacity of knapsack (W) as a percentage of the total of the weights list (W = max_weight_pct \times total_weight).
        problem = mlrose.DiscreteOpt(length=problem_size_space,
                                     fitness_fn=fitness,
                                     maximize=True,
                                     max_val=2)
        # SA
        # super().gridSearchSA(problem,'Knapsack',problem_size_space,self.noOfiteration)
        # RHC
        # super().gridSearchRHC(problem,'Knapsack',problem_size_space,self.noOfiteration)
        #GA
        # super().gridSearchGA(problem,'Knapsack',problem_size_space,self.noOfiteration)
        #MIMIC
        super().gridSearchMIMIC(problem, 'Knapsack', problem_size_space,
                                self.noOfiteration)
Exemplo n.º 3
0
def get_fitness_functions():
    df = pd.read_csv("./houston2008_order.csv")
    coord_list = list(df[['lat', 'long']].apply(tuple, axis=1))
    coord_list = coord_list[0:30]
    fitness_tsp = mlrose.TravellingSales(coords=coord_list)
    problem_tsp = mlrose.TSPOpt(length=len(coord_list),
                                fitness_fn=fitness_tsp,
                                maximize=False)

    fitness_fourpeak = mlrose.FourPeaks(t_pct=.3)
    problem_fourpeak = mlrose.DiscreteOpt(length=20,
                                          fitness_fn=fitness_fourpeak)

    fitness_flipflop = mlrose.FlipFlop()
    problem_flipflop = mlrose.DiscreteOpt(length=30,
                                          fitness_fn=fitness_flipflop)

    fitness_one_max = mlrose.OneMax()
    problem_one_max = mlrose.DiscreteOpt(
        length=35,
        fitness_fn=fitness_one_max,
    )

    weights = [10, 5, 2, 8, 15]
    values = [1, 2, 3, 4, 5]
    max_weight_pct = 0.6
    fitness_knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    problem_knapsack = mlrose.DiscreteOpt(length=5,
                                          fitness_fn=fitness_knapsack)

    return {
        "tsp": problem_tsp,
        "four_peaks": problem_fourpeak,
        "one_max": problem_one_max,
    }
Exemplo n.º 4
0
def create_knapsack_problem(length):
    weights = np.random.randint(1, 20, length)
    values = np.random.randint(1, 5, length)
    fitness = mlrose_hiive.Knapsack(weights, values, max_weight_pct=1)
    knapsack = mlrose_hiive.opt_probs.discrete_opt.DiscreteOpt(
        length=len(weights), fitness_fn=fitness, maximize=True)
    return knapsack
Exemplo n.º 5
0
def get_knapsack(size):
    weights = [10, 5, 2, 8, 15]
    values = [1, 2, 3, 4, 5]
    max_weight_pct = 0.6
    knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    state = np.array([1, 0, 2, 1, 0])
    knapsack.evaluate(state)
    problem = mlrose.DiscreteOpt(
        length=size,
        fitness_fn=knapsack,
        maximize=True,
        max_val=2  # makes it bit string
    )
    return problem
Exemplo n.º 6
0
def ks_fitness_fn(state):
    global eval_count
    """
    N = 5
    weights = [10, 5, 2, 8, 15]
    values =  [1, 2, 3, 4, 5]
    max_weight_pct = 0.8
    """
    N = 10
    weights = [0.11133083, 0.21076757, 0.23296249, 0.15194456, 0.83017814, 0.40791941,
              0.5557906,  0.74552394, 0.24849976, 0.9686594 ]
    values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    max_weight_pct = 0.8
    fitness = mlrose_hiive.Knapsack(weights=weights, values=values, max_weight_pct=max_weight_pct)
    eval_count += 1
    return fitness.evaluate(state)
Exemplo n.º 7
0
def get_problem(size=100):
    global orig_fitness_func

    seed = 42
    number_of_items_types = size
    max_weight_per_item = 25
    max_value_per_item = 10
    max_weight_pct = 0.35
    max_item_count = 10
    multiply_by_max_item_count = True
    np.random.seed(seed)
    weights = 1 + np.random.randint(max_weight_per_item, size=number_of_items_types)
    values = 1 + np.random.randint(max_value_per_item, size=number_of_items_types)
    orig_fitness_func = mlrose_hiive.Knapsack(weights, values, max_weight_pct=max_weight_pct, max_item_count=max_item_count, multiply_by_max_item_count=multiply_by_max_item_count)

    fitness = mlrose_hiive.CustomFitness(fitness_func)
    problem = mlrose_hiive.DiscreteOpt(length=number_of_items_types, fitness_fn=fitness, maximize=True)
    return problem
def main():
    ## SET SOME PARAMS TO USE GLOBALLY
    max_iters_list = [50, 100, 1000]  #,32,64,128,256,512,1024]
    max_iters_list_full = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
    rand_list = [1, 11, 22]  #,44,55,66,77,88,99]
    rand_list_full = [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]
    input_location = 'data/'
    output_location = 'outputs/'
    chart_output_location = 'charts/'
    prefix = '5th_'

    ## DEFINE PROBLEMS TO SOLVE
    # Traveling Salesman Problem (TSP)
    space_length = 1000
    cities_cnt = 200
    coords_list, x, y = create_TSP(space_length,
                                   cities_cnt,
                                   return_lists_too=True)
    plt.plot(x, y, 'o')
    plt.savefig(chart_output_location + 'TPS_visual' + '.png')
    fitness_coords = mlrose.TravellingSales(coords=coords_list)
    problem_TSP = mlrose.TSPOpt(length=len(coords_list),
                                fitness_fn=fitness_coords,
                                maximize=False)

    # 4 Peaks
    t_pct = 0.1
    length = 200
    fitness_4_peaks = mlrose.FourPeaks(t_pct=t_pct)
    problem_4P = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_4_peaks,
                                    maximize=True,
                                    max_val=2)
    problem_4P_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_4_peaks,
                                          maximize=True,
                                          max_val=2)
    problem_4P_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_4_peaks,
                                        maximize=True,
                                        max_val=2)

    # Continuous Peaks
    t_pct = 0.1
    length = 200
    fitness_cont_peaks = mlrose.ContinuousPeaks(t_pct=t_pct)
    problem_cont_peaks = mlrose.DiscreteOpt(length=length,
                                            fitness_fn=fitness_cont_peaks,
                                            maximize=True,
                                            max_val=2)

    # Flip Flop
    length = 200
    fitness_FF = mlrose.FlipFlop()
    problem_FF = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_FF,
                                    maximize=True,
                                    max_val=2)
    problem_FF_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_FF,
                                          maximize=True,
                                          max_val=2)
    problem_FF_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_FF,
                                        maximize=True,
                                        max_val=2)

    # Knapsack
    length = 200
    weights, values = create_Knapsack(length)
    weights_big, values_big = create_Knapsack(1000)
    weights_small, values_small = create_Knapsack(50)
    fitness_KS = mlrose.Knapsack(weights, values, max_weight_pct=0.65)
    fitness_KS_big = mlrose.Knapsack(weights_big,
                                     values_big,
                                     max_weight_pct=0.65)
    fitness_KS_small = mlrose.Knapsack(weights_small,
                                       values_small,
                                       max_weight_pct=0.65)
    problem_KS = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_KS,
                                    maximize=True,
                                    max_val=2)
    problem_KS_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_KS_big,
                                        maximize=True,
                                        max_val=2)
    problem_KS_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_KS_small,
                                          maximize=True,
                                          max_val=2)

    dict_of_param_dict = {}
    dict_of_param_dict['GA'] = {
        'pop_size': [100, 200],  #,1000],
        'mutation_prob': [0.5, 0.1, 0.2],
        'max_attempts': [5, 10, 30],
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['RHC'] = {
        'max_attempts': [30, 50, 100],  #[5,10,20,50]
        'restarts': [5, 10, 20],  #[0,1,2,5]
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['SA'] = {
        'max_attempts': [10, 50, 100],
        'init_temp': [1.0, 10.0, 0.5, 20, 100, 1000],
        'decay': [0.99, 0.8, 0.5],
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['MIMIC'] = {
        'pop_size': [100, 150],
        'keep_pct': [0.5, 0.2],
        'max_attempts': [10],
        'max_iters': [100],
        'random_state': rand_list
    }

    MIMIC_FF = {
        'pop_size': 100,
        'keep_pct': 0.5,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_4P = {
        'pop_size': 150,
        'keep_pct': 0.2,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_KS = {
        'pop_size': 150,
        'keep_pct': 0.5,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_CP = {
        'pop_size': 200,
        'keep_pct': 0.2,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    GA_FF = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.5,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    MIMIC_FF2 = {
        'pop_size': [100],
        'keep_pct': [0.5],
        'max_attempts': [30, 50],
        'max_iters': [64],
        'random_state': [55]  #,66,77,88,99]
    }

    print("starting MIMIC FF")
    # GETTING MIMIC FF RESULTS
    print("starting MIMIC FF...")
    ''' ## Started running at 3am
    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_FF, MIMIC_FF['max_iters'], MIMIC_FF['random_state']\
    , pop_size=MIMIC_FF['pop_size'], max_attempts=MIMIC_FF['max_attempts'], curve=True, keep_pct=MIMIC_FF['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_FF_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_4P, MIMIC_4P['max_iters'], MIMIC_4P['random_state']\
    , pop_size=MIMIC_4P['pop_size'], max_attempts=MIMIC_4P['max_attempts'], curve=True, keep_pct=MIMIC_4P['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_4P_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_KS, MIMIC_KS['max_iters'], MIMIC_KS['random_state']\
    , pop_size=MIMIC_KS['pop_size'], max_attempts=MIMIC_KS['max_attempts'], curve=True, keep_pct=MIMIC_KS['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_KS_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_cont_peaks, MIMIC_CP['max_iters'], MIMIC_CP['random_state']\
    , pop_size=MIMIC_CP['pop_size'], max_attempts=MIMIC_CP['max_attempts'], curve=True, keep_pct=MIMIC_CP['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_CP_attempt_3am.csv')

    '''

    ## USED FOR GRID SEARCHING PARAMETERS FOR RO ON 3 PROBLEMS
    GA_params_dict = get_params_for_grid_search('GA', max_iters_list=[200])
    print("Here are my GA params for grid search: ", GA_params_dict)
    SA_params_dict = get_params_for_grid_search('SA',
                                                max_iters_list=max_iters_list)
    print("Here are my SA params for grid search: ", SA_params_dict)
    RHC_params_dict = get_params_for_grid_search('RHC',
                                                 max_iters_list=max_iters_list)
    print("Here are my RHC params for grid search: ", RHC_params_dict)
    MIMIC_params_dict = get_params_for_grid_search(
        'MIMIC', max_iters_list=max_iters_list)
    print("Here are my MIMIC params for grid search: ", MIMIC_params_dict)
    #grid_search_MIMIC = MIMIC_best_params(problem_TPS, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + 'grid_search_MIMIC.csv')
    '''
    grid_search_GA = GA_best_params(problem_FF, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_FF_really.csv')
    print("finished GA")
    grid_search_MIMIC = MIMIC_best_params(problem_FF, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_FF_really.csv')
    '''
    print("finished MIMIC FF")

    print("Doing GA rn")
    #results_df, curve_output_list = fitness_by_iter('GA', problem_FF, GA_FF['max_iters'], GA_FF['random_state']\
    #, pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], mutation_prob=GA_FF['mutation_prob'],curve=True)
    #results_df.to_csv(output_location + 'final_MIMIC_FF_attempt_1am.csv')
    print("finished GA")
    ''' GRID SEARCHING

    print("Starting grid search for RHC")
    grid_search_RHC = RHC_best_params(problem_TSP, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix +'grid_search_RHC_TSP.csv')
    grid_search_RHC = RHC_best_params(problem_FF, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_FF.csv')
    grid_search_RHC = RHC_best_params(problem_cont_peaks, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_cont_peaks.csv')
    grid_search_RHC = RHC_best_params(problem_4P, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_4P.csv')

    print("Starting grid search for SA")
    grid_search_SA = SA_best_params(problem_TSP, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_TSP.csv')
    grid_search_SA = SA_best_params(problem_FF, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_FF.csv')
    grid_search_SA = SA_best_params(problem_cont_peaks, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_cont_peaks.csv')
    grid_search_SA = SA_best_params(problem_4P, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_4P.csv')

    print("Starting grid search for GA")
    grid_search_GA = GA_best_params(problem_TSP, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_TSP.csv')
    grid_search_GA = GA_best_params(problem_FF, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_FF.csv')
    grid_search_GA = GA_best_params(problem_cont_peaks, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_cont_peaks.csv')
    grid_search_GA = GA_best_params(problem_4P, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_4P.csv')
    '''
    '''
    print("Starting grid search for MIMIC")
    grid_search_MIMIC = MIMIC_best_params(problem_FF, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_FF.csv')
    #grid_search_MIMIC = MIMIC_best_params(problem_cont_peaks, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_cont_peaks.csv')
    grid_search_MIMIC = MIMIC_best_params(problem_4P, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_4P.csv')
    #grid_search_MIMIC = MIMIC_best_params(problem_TSP, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + 'grid_search_MIMIC_TSP.csv')
    print("Finished MIMIC grid searches")

    print("Starting grid search for Knapsack")
    #grid_search_MIMIC = MIMIC_best_params(problem_KS, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_KS.csv')
    #grid_search_GA = GA_best_params(problem_KS, GA_params_dict, inverse_fitness=False)
    #grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_KS.csv')
    grid_search_SA = SA_best_params(problem_KS, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_KS.csv')
    grid_search_RHC = RHC_best_params(problem_KS, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_KS.csv')
    '''

    ## Fitting MIMIC separately and with fewer iterations for all except the FF as run time is so long for MIMIC
    max = 128
    ''' MIMIC CURVE FOR CHARTS ##### Started (again) at 8am ######

    print("Fitting for MIMIC using the 'curve=True' functionality")
    print("First for KS")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_KS, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_KS_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_short_curve.csv')
    print("Finished KS")

    print("Next for 4 Peaks")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 100 and 0.5")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop100_keep50_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop100_keep50_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 100 and 0.2")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop100_keep20_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop100_keep20_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 150 and 0.5")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop150_keep50_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop150_keep50_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks Big")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P_big, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_big_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_big_short_curve.csv')
    print("Finished 4 Peaks Big")

    print("Next for KS Small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_KS_small, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_KS_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_small_short_curve.csv')
    print("Finished KS small")

    print("Next FF small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_FF_small, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_FF_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_small_short_curve.csv')
    print("Finished FF Small")

    print("Next for 4 Peaks Small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P_small, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_small_short_curve.csv')
    print("Finished 4 Peaks Small")
    '''

    ### Now GA

    GA_FF = {
        'pop_size': 100,  #,1000],
        'mutation_prob': 0.1,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    GA_KS = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.2,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    GA_4P = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.5,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    ''' More fitness by iteration calculations
    #results_df, curve_output_list = fitness_by_iter('GA', problem_FF, GA_FF['max_iters'], GA_FF['random_state']\
    #, pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_FF_small, GA_FF['max_iters'], GA_FF['random_state']\
    , pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_FF_small_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_FF_big, GA_FF['max_iters'], GA_FF['random_state']\
    , pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_FF_big_attempt_8am.csv')



    #results_df, curve_output_list = fitness_by_iter('GA', problem_4P, GA_4P['max_iters'], GA_4P['random_state']\
    #, pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_4P_big, GA_4P['max_iters'], GA_4P['random_state']\
    , pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_4P_small, GA_4P['max_iters'], GA_4P['random_state']\
    , pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_4P_small_attempt_8am.csv')



    #results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    #, pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS_big, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS_small, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_KS_small_attempt_8am.csv')

    '''

    ########### SA
    print("now doing SA")
    SA_4P = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.8),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    SA_FF = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.8),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_attempt_8am.csv')

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P_big, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_small_attempt_8am.csv')
    ''' more fitness by iteration calculations
    #results_df, curve_output_list = fitness_by_iter('SA', problem_FF, SA_FF['max_iters'], SA_FF['random_state']\
    #, schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    #results_df.to_csv(output_location + 'final_SA_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_FF_big, SA_FF['max_iters'], SA_FF['random_state']\
    , schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_FF_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_FF_small, SA_FF['max_iters'], SA_FF['random_state']\
    , schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_FF_small_attempt_8am.csv')


    SA_4P = {
    'max_attempts':10,
    'schedule':mlrose.GeomDecay(init_temp=100, decay=0.8),
    'max_iters':max_iters_list_full,
    'random_state':rand_list_full
    }

    results_df, curve_output_list = fitness_by_iter('KS', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P_big, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_small_attempt_8am.csv')
    '''
    print("picking up where I left off on making the final curves..")

    SA_KS = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=1000, decay=0.99),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    ''' more fitness by iteration calculations
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS_big, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS_small, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_small_attempt_8am.csv')
    '''

    RHC_KS = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS_big, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS_small, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_small_attempt_8am.csv')
    '''
    RHC_FF = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF_big, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF_small, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_small_attempt_8am.csv')
    '''

    RHC_4P = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P_small, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_small_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P_big, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_big_attempt_8am.csv')
    '''

    ## where it stopped
    print("I will now make the complexity curves for other algos")
    SA_4P_hacked = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.99),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P_hacked['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_decay_99.csv')

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=mlrose.GeomDecay(init_temp=1, decay=0.8), max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_T_1_decay_80.csv')

    results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=0.1)
    results_df.to_csv(output_location + 'final_GA_KS_mutation_01.csv')
    '''
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=100, max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=0.2)
    results_df.to_csv(output_location + 'final_GA_KS_mutation_02_pop_100.csv')

    ## Need a few more MIMIC chart inputs
    #print("Need a few more MIMIC chart inputs, so I will now make those")
    #print("Next FF p=100 keep=0.2")
    ''' MIMIC inputs for charts
Exemplo n.º 9
0
    run_rh = (args.rh == 'y')
    run_mi = (args.mi == 'y')
    run_plots = (args.plot == 'y')

    vLength = 50

    iterlist = [i for i in range(vLength * 2)]
    galist = [i for i in range(vLength * 2)]

    mimiciterlist = [i for i in range(int(vLength))]

    max_one = mlrose.OneMax()
    run_toy_data(max_one, vLength, "max_one", run_ga, run_sa, run_rh, run_mi,
                 iterlist, galist, mimiciterlist)

    four_peaks = mlrose.FourPeaks(t_pct=.2)
    run_toy_data(four_peaks, vLength, "four_peaks", run_ga, run_sa, run_rh,
                 run_mi, iterlist, galist, mimiciterlist)

    weights = [random.randint(5, 30) for i in range(vLength)]
    values = [random.randint(1, 5) for i in range(vLength)]
    max_weight_pct = 0.6
    knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    run_toy_data(knapsack, vLength, "knapsack", run_ga, run_sa, run_rh, run_mi,
                 iterlist, galist, mimiciterlist)

    if run_plots == True:
        plot_fitness_time("max_one", "Max Ones")
        plot_fitness_time("four_peaks", "Four Peaks")
        plot_fitness_time("knapsack", "Knapsack")
Exemplo n.º 10
0
def runPart1(savePath):
    fitness = mlrose.FourPeaks(t_pct=0.15)
    init_state = None
    fourPeaksProblem = mlrose.DiscreteOpt(length=12,
        fitness_fn=fitness, maximize=True, max_val=2)

    part1_1 = Part1(name='Four Peaks', fitness=fitness,
                    problem=fourPeaksProblem, init_state=init_state)
    part1_1.runAll(savePath)

    fitness = mlrose.Queens()
    init_state = None
    eightQueensProblem = mlrose.DiscreteOpt(length=8,
        fitness_fn=fitness, maximize=False, max_val=8)
    part1_2 = Part1(name='Eight Queens', fitness=fitness,
                    problem=eightQueensProblem, init_state=init_state)
    part1_2.runAll(savePath)

    fitness = mlrose.SixPeaks(t_pct=0.15)
    init_state = None
    sixPeaksProblem = mlrose.DiscreteOpt(length=11,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_4 = Part1(name='Six Peaks', fitness=fitness,
                    problem=sixPeaksProblem, init_state=init_state)
    part1_4.runAll(savePath)

    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=7,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 7', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)


    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=100,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 100', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)

    fitness = mlrose.Queens()
    init_state = None
    eightQueensProblem = mlrose.DiscreteOpt(length=80,
        fitness_fn=fitness, maximize=False, max_val=8)
    part1_2 = Part1(name='Eighty Queens', fitness=fitness,
                    problem=eightQueensProblem, init_state=init_state)
    part1_2.runAll(savePath)

    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=15,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 15', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)


    edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
    fitness = mlrose.MaxKColor(edges)
    init_state = None
    maxKColorsProblem = mlrose.DiscreteOpt(length=7,
        fitness_fn=fitness, maximize=False, max_val=2)
    part1_3 = Part1(name='Max-K Color', fitness=fitness,
                    problem=maxKColorsProblem, init_state=init_state)
    part1_3.runAll(savePath)

    # =============================================================
    #  Source - Tutorial from MLRose Docs
    #  https://mlrose.readthedocs.io/en/stable/source/tutorial2.html
    # 
    # =============================================================
    # Create list of city coordinates
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3)]

    # Initialize fitness function object using coords_list
    fitness_coords = mlrose.TravellingSales(coords = coords_list)

    # Create list of distances between pairs of cities
    dist_list = [(0, 1, 3.1623), (0, 2, 4.1231), (0, 3, 5.8310), (0, 4, 4.2426), \
                (0, 5, 5.3852), (0, 6, 4.0000), (0, 7, 2.2361), (1, 2, 1.0000), \
                (1, 3, 2.8284), (1, 4, 2.0000), (1, 5, 4.1231), (1, 6, 4.2426), \
                (1, 7, 2.2361), (2, 3, 2.2361), (2, 4, 2.2361), (2, 5, 4.4721), \
                (2, 6, 5.0000), (2, 7, 3.1623), (3, 4, 2.0000), (3, 5, 3.6056), \
                (3, 6, 5.0990), (3, 7, 4.1231), (4, 5, 2.2361), (4, 6, 3.1623), \
                (4, 7, 2.2361), (5, 6, 2.2361), (5, 7, 3.1623), (6, 7, 2.2361)]

    # Initialize fitness function object using dist_list
    fitness_dists = mlrose.TravellingSales(distances = dist_list)

    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length = 8, fitness_fn = fitness_coords, maximize=False)

    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3)]

    # Define optimization problem object
    problem_no_fit = mlrose.TSPOpt(length = 8, coords = coords_list, maximize=False)

    part1_6 = Part1(name='TSP', fitness=coords_list,
                    problem=problem_no_fit, init_state=None)
    part1_6.runAll(savePath)

    # Knapsack
    weights = np.random.randint(2, high=20, size=50)
    values = np.random.randint(2, high=100, size=50)
    max_weight_pct = 0.8
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)
    knapsackProblem = mlrose.DiscreteOpt(length=50,
        fitness_fn=fitness, maximize=False, max_val=2)

    part1_7 = Part1(name='Knapsack', fitness=fitness,
                    problem=knapsackProblem, init_state=None)
    part1_7.runAll(savePath)
Exemplo n.º 11
0
rhc_times=[]
rhc_scores=[]
gen_times=[]
gen_scores=[]
mimic_times=[]
mimic_scores=[]
for i in range(10, 51, 10):
    if i==0:
        continue
    ex = {"weights": [random.randint(1, 20) for i in range(i)], "values": [random.randint(1, 10) for i in range(i)], "state": np.array([random.randint(0, 2) for i in range(i)])}
    input_sizes.append(i)
    weights = ex['weights']
    values = ex['values']
    state = ex['state']
    max_weight_pct = 0.6
    fitness = mlrose_hiive.Knapsack(weights, values, max_weight_pct)
    fitness.evaluate(state)
    problem = mlrose_hiive.DiscreteOpt(length = len(state), fitness_fn = fitness, maximize = True, max_val = int(max(state))+1)
    times = []
    best_scores = []

    start_time = time.time()
    best_state, best_fitness, fitness_curve = sa(problem,state, 30, 1000)
    elapsed_time = time.time() - start_time
    print(elapsed_time)
    times.append(elapsed_time*1000)
    best_scores.append(best_fitness)
    sa_times.append(elapsed_time*1000)
    sa_scores.append(best_fitness)
    plt.close()
    
Exemplo n.º 12
0
 def run_Knap(self, mode=None):
     weights = [10, 5, 2, 8, 15]
     values = [1, 2, 3, 4, 5]
     max_weight_pct = 0.6
     fitness_fn = mlrose.Knapsack(weights, values, max_weight_pct)
     self.run_complexity(fitness_fn, mode)
Exemplo n.º 13
0
def fitness_counter(state):
    global eval_count
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)
    eval_count += 1
    return fitness.evaluate(state)
Exemplo n.º 14
0
def knapsack(length=100, pct=0.5):
    weights = np.random.randint(1, length, size=length)
    values = np.random.randint(1, length, size=length)
    return mlrose.Knapsack(weights, values, pct), sum(values)
Exemplo n.º 15
0
def Knapsack():
    rs = 2  #random state
    ma = 200  #max attempts

    items = 40  # number of items
    random.seed(6)

    weights = []
    values = []
    for i in range(0, items):
        weights.append((random.random() + 0.1) * 30)
        #weights.append(random.randint(1,31))
        #values.append(random.randint(1, 500))
        values.append((random.random() + 0.1) * 500)

    #weights=[9,13,153,50,15,68,27,39,23,52,11,32,24,48,73,42,43,22,7,18,4,30,153,50,15,68,68,27,27,39]
    #values=[150,35,200,60,60,45,60,40,30,10,70,30,15,10,40,70,75,80,20,12,50,10,200,60,60,45,45,60,60,40]
    #print(len(weights))
    #print(weights)

    max_weight_pct = 0.6
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)

    problem_fit = mlrose.DiscreteOpt(length=len(weights),
                                     fitness_fn=fitness,
                                     maximize=True)

    # Fitness curve
    alltime = []
    import time
    start = time.time()
    best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
        problem_fit,
        pop_size=300,
        mutation_prob=0.7,
        curve=True,
        max_attempts=ma,
        random_state=rs)
    end = time.time()
    alltime.append((end - start))

    start = time.time()
    best_state, best_fitness, rhcfitness_curve = mlrose.random_hill_climb(
        problem_fit, curve=True, max_attempts=ma, random_state=rs)
    end = time.time()
    alltime.append((end - start))

    start = time.time()
    SA_schedule = mlrose.GeomDecay(init_temp=100000, decay=0.95, min_temp=1)
    best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
        problem_fit,
        schedule=SA_schedule,
        curve=True,
        max_attempts=ma,
        random_state=rs)
    end = time.time()
    alltime.append((end - start))

    start = time.time()
    best_state, best_fitness, mimicfitness_curve = mlrose.mimic(
        problem_fit,
        curve=True,
        max_attempts=ma,
        pop_size=400,
        keep_pct=0.3,
        random_state=rs)
    end = time.time()
    alltime.append((end - start))

    # Plot time comparison
    plt.figure()
    algorithms = ['GA', 'RHC', 'SA', 'MIMIC']
    plt.bar(algorithms, alltime)
    plt.title("Running time for Knapsack problem (seconds)")
    plt.ylabel('Time (s)')
    plt.xlabel('Random search algorithms')
    plt.tight_layout()
    i = 0
    for a in algorithms:
        plt.text(a,
                 alltime[i] + 0.05,
                 '%.2f' % alltime[i],
                 ha='center',
                 va='bottom',
                 fontsize=11)
        i += 1
    plt.savefig("Running time for Knapsack problem")
    plt.show()

    plt.title("Knapsack problem fitness vs iterations")
    plt.plot(gafitness_curve, label='GA', color='r')
    plt.plot(rhcfitness_curve, label='RHC', color='b')
    plt.plot(safitness_curve, label='SA', color='orange')
    plt.plot(mimicfitness_curve, label='MIMIC', color='g')
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack fitness curve")
    plt.show()

    # MIMIC Fitness vs Iterations as cpt changes
    CPT = [0.1, 0.3, 0.5, 0.7, 0.9]
    plt.figure()
    for c in CPT:
        best_state, best_fitness, mimicfitness_curve = mlrose.mimic(
            problem_fit,
            keep_pct=c,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(mimicfitness_curve, label='pct = ' + str(c))

    plt.title(
        "Knapsack problem using MIMIC with different values of pct parameter")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack MIMIC parameter")
    plt.show()

    # GA Fitness vs Iterations as mutation prob changes
    Mutate = [0.1, 0.3, 0.5, 0.7, 0.9]
    plt.figure()
    for m in Mutate:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
            problem_fit,
            mutation_prob=m,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(gafitness_curve, label='mutation = ' + str(m))

    plt.title(
        "Knapsack problem using GA with  different values of mutation probability"
    )
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack GA parameter")
    plt.show()

    # SA Fitness vs Iterations as schedule changes
    # schedule = mlrose.GeomDecay(init_temp=10, decay=0.95, min_temp=1)

    init_temp = 1.0
    decay_r = [0.15, 0.35, 0.55, 0.75, 0.95]
    plt.figure()
    for d in decay_r:
        SAschedule = mlrose.GeomDecay(init_temp=100000, decay=d, min_temp=1)
        best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
            problem_fit,
            schedule=SAschedule,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(safitness_curve, label='decay rate = ' + str(d))

    plt.title("Knapsack problem using SA with different values of decay rate")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack SA parameter")
    plt.show()

    init_temp = 1.0
    temps = [100000, 10000, 1000, 100, 10, 5]
    plt.figure()
    for t in temps:
        SAschedule = mlrose.GeomDecay(init_temp=t, decay=0.95, min_temp=1)
        best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
            problem_fit,
            schedule=SAschedule,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(safitness_curve, label='Temperature = ' + str(t))

    plt.title("Knapsack problem using SA with different values of temperature")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack SA temp")
    plt.show()

    Mutate = [0.1, 0.1, 0.1, 0.1, 0.1]
    pop = [50, 100, 200, 300, 400]
    Mutatepop = [(100, 0.2), (100, 0.5), (100, 0.7), (200, 0.2), (200, 0.5),
                 (200, 0.7), (300, 0.2), (300, 0.5), (300, 0.7)]
    plt.figure()
    for m in Mutatepop:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
            problem_fit,
            pop_size=m[0],
            mutation_prob=m[1],
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(gafitness_curve,
                 label='pop size = ' + str(m[0]) + ', mutation = ' + str(m[1]))

    plt.title("Knapsack using GA with  different parameters")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack GA parameter mutate pop")
    plt.show()

    temps = [10000000, 1000000, 100000, 10000, 1000, 100, 10, 5]
    plt.figure()
    for t in temps:
        SAschedule = mlrose.GeomDecay(init_temp=t, decay=0.95, min_temp=1)
        best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
            problem_fit,
            schedule=SAschedule,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(safitness_curve, label='Temperature = ' + str(t))

    plt.title("Knapsack problem using SA with different values of temperature")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack SA temp")
    plt.show()

    CPT = [0.1, 0.3, 0.5, 0.9]
    pp = [(100, 0.2), (100, 0.5), (100, 0.7), (100, 0.9), (200, 0.2),
          (200, 0.5), (200, 0.7), (200, 0.9), (500, 0.2), (500, 0.5),
          (500, 0.7), (500, 0.9)]
    plt.figure()
    Pop = [100, 200, 300, 400, 500]
    for p in Pop:
        best_state, best_fitness, mimicfitness_curve = mlrose.mimic(
            problem_fit,
            pop_size=p,
            keep_pct=0.3,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(mimicfitness_curve, label='pop size = ' + str(p))

    plt.title("Knapsack problem using MIMIC with different parameters")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Knapsack MIMIC parameter pop pct")
    plt.show()
Exemplo n.º 16
0
def main(task):
    if 'tune_problem' in task:
        # Tune Knapsack problem
        problem_size = 50
        weights = [idx for idx in range(1, problem_size + 1)]
        values = [idx for idx in range(1, problem_size + 1)]
        max_weight_pct_list = np.arange(0.1, 1, 0.05)
        knapsack_tuning_fitness = []
        knapsack_tuning_time = []
        knapsack_tuning_fevals = []
        for max_weight_pct in max_weight_pct_list:
            fitness = mlrose.Knapsack(weights, values, max_weight_pct)
            problem = mlrose.DiscreteOpt(problem_size,
                                         fitness,
                                         maximize=True,
                                         max_val=2)
            experiment_name = 'knapsack_tuning_weight_pct_' + str(
                max_weight_pct)
            temperature_list = np.arange(1, 50, 1)
            knapsack = runners.SARunner(problem=problem,
                                        experiment_name=experiment_name,
                                        output_directory='knapsack',
                                        seed=27,
                                        iteration_list=[5000],
                                        max_attempts=50,
                                        temperature_list=temperature_list)
            # the two data frames will contain the results
            knapsack_run_stats, knapsack_run_curves = knapsack.run()
            knapsack_tuning_fitness.append(knapsack_run_curves.loc[
                knapsack_run_curves['Fitness'].idxmax()]['Fitness'])
            knapsack_tuning_time.append(knapsack_run_curves.loc[
                knapsack_run_curves['Time'].idxmax()]['Time'])
            knapsack_tuning_fevals.append(2 * knapsack_run_curves.loc[
                knapsack_run_curves['Iteration'].idxmax()]['Iteration'])

        plt.rc("font", size=8)
        plt.rc("axes", titlesize=12)
        plt.rc("axes", labelsize=10)
        plt.rc("xtick", labelsize=8)
        plt.rc("ytick", labelsize=8)
        plt.rc("legend", fontsize=11)
        plt.rc("figure", titlesize=11)
        fig, ax = plt.subplots(1, 3, figsize=(10, 3.5))
        fig.suptitle('Knapsack Tuning w/ Simulated Annealing Optimizer',
                     fontsize=14)
        ax[0].scatter(max_weight_pct_list,
                      knapsack_tuning_fitness,
                      c='r',
                      marker='x',
                      s=10)
        ax[0].set(xlabel='Max Weight %', ylabel='Max Fitness')
        ax[1].scatter(max_weight_pct_list,
                      knapsack_tuning_time,
                      c='g',
                      marker='o',
                      s=10)
        ax[1].set(xlabel='Max Weight %', ylabel='Max Runtime (s)')
        ax[2].scatter(max_weight_pct_list,
                      knapsack_tuning_fevals,
                      c='b',
                      marker='+')
        ax[2].set(xlabel='Max Weight %', ylabel='Max Function Evaluations')
        ax[2].yaxis.tick_right()
        plt.show()

        return
    if 'tuning_plots' in task:
        # FOUR PEAKS GOOD FOR GENETIC
        # Tune Algorithms
        problem_size = 50

        # Knapsack
        weights = [idx for idx in range(1, problem_size + 1)]
        print(weights)
        #weights = np.ones(100)
        values = [idx for idx in range(1, problem_size + 1)]
        #values = np.arange(1, 101)
        max_weight_pct = 0.3
        knapsack_fitness = mlrose.Knapsack(weights, values, max_weight_pct)
        #state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
        #problem = mlrose.DiscreteOpt(problem_size, four_peaks_fitness, maximize=True, max_val=2)
        #temperature_list = np.arange(0.1, 2, 0.1)
        best_fitness_list = []
        #for size in problem_size_list:
        problem = mlrose.DiscreteOpt(problem_size,
                                     knapsack_fitness,
                                     maximize=True,
                                     max_val=2)

        problem_size = 50
        rhc_fitness_tuning_list = []
        rhc_param_tuning_list = []
        rhc_feval_tuning_list = []
        time_tuning_list = []
        asdf_list = []
        fdsa_list = []
        experiment_name = 'rhc_knapsack_tuning_size_' + str(problem_size)
        #restart_list = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
        restart_list = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
        rhc = runners.RHCRunner(problem=problem,
                                experiment_name=experiment_name,
                                output_directory='knapsack',
                                seed=27,
                                iteration_list=[5000],
                                max_attempts=125,
                                restart_list=restart_list)
        # the two data frames will contain the results
        rhc_run_stats, rhc_run_curves = rhc.run()
        for restart in restart_list:
            this_temp_df = rhc_run_curves.loc[rhc_run_curves['Restarts'] ==
                                              restart]
            this_temp_df[
                'Iteration'] = this_temp_df['Iteration'] - this_temp_df.loc[
                    this_temp_df['Iteration'].idxmin()]['Iteration'] + 1
            rhc_fitness_tuning_list.append(
                this_temp_df.loc[this_temp_df['Fitness'].idxmax()]['Fitness'])
            rhc_param_tuning_list.append(restart)
            time_tuning_list.append(
                this_temp_df.loc[this_temp_df['Time'].idxmax()]['Time'])
            rhc_feval_tuning_list.append(3 * this_temp_df.loc[
                this_temp_df['Iteration'].idxmax()]['Iteration'])
            asdf_list.append(this_temp_df['Fitness'])
            fdsa_list.append(this_temp_df['Iteration'])
        # plt.rc("font", size=8)
        # plt.rc("axes", titlesize=12)
        # plt.rc("axes", labelsize=10)
        # plt.rc("xtick", labelsize=8)
        # plt.rc("ytick", labelsize=8)
        # plt.rc("legend", fontsize=8)
        # plt.rc("figure", titlesize=11)
        # #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        # fig, ax = plt.subplots(1,3,figsize=(12,3.5))
        # fig.suptitle('RHC Restarts Tuning, problem_size = ' + str(problem_size))
        # ax[0].scatter(param_tuning_list, time_tuning_list, c='r', marker='x', s=10)
        # ax[0].set(xlabel='Restarts', ylabel = 'Time')

        # ax[1].scatter(param_tuning_list, fitness_tuning_list, c='g', marker='o', s=10)
        # ax[1].set(xlabel='Restarts', ylabel = 'Fitness')

        # ax[2].scatter(param_tuning_list, feval_tuning_feval, c='g', marker='o', s=10)
        # ax[2].set(xlabel='Restartsc', ylabel = 'Function Evaluations')
        # ax[2].yaxis.tick_right()

        # plt.show()

        # fig, ax = plt.subplots()
        # ax.scatter(fdsa_list[7], asdf_list[7])
        # ax.set(xlabel='Iteration', ylabel = 'Fitness')
        # plt.show()
        # problem_size = 50

        sa_fitness_tuning_list = []
        sa_param_tuning_list = []
        time_tuning_list = []
        sa_feval_tuning_list = []
        asdf_list = []
        fdsa_list = []
        experiment_name = 'sa_knapsack_tuning_size_' + str(problem_size)
        temperature_list = np.arange(1, 50, 0.5)
        sa = runners.SARunner(problem=problem,
                              experiment_name=experiment_name,
                              output_directory='knapsack',
                              seed=27,
                              iteration_list=[1000],
                              max_attempts=50,
                              temperature_list=temperature_list)
        #decay_list=mlrose.GeomDecay(init_temp=1.1))
        #temperature_list=[1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000])
        #temperature_list=[1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000])
        # the two data frames will contain the results
        df_run_stats, df_run_curves = sa.run()
        df_run_curves['Temperature'] = pd.to_numeric(
            df_run_curves['Temperature'].astype(str).astype(float))
        for temp in temperature_list:
            this_temp_df = df_run_curves.loc[df_run_curves['Temperature'] ==
                                             temp]
            this_temp_df[
                'Iteration'] = this_temp_df['Iteration'] - this_temp_df.loc[
                    this_temp_df['Iteration'].idxmin()]['Iteration'] + 1
            sa_fitness_tuning_list.append(
                this_temp_df.loc[this_temp_df['Fitness'].idxmax()]['Fitness'])
            sa_param_tuning_list.append(temp)
            sa_feval_tuning_list.append(2 * this_temp_df.loc[
                this_temp_df['Iteration'].idxmax()]['Iteration'])
            time_tuning_list.append(
                this_temp_df.loc[this_temp_df['Time'].idxmax()]['Time'])
            asdf_list.append(this_temp_df['Fitness'])
            fdsa_list.append(this_temp_df['Iteration'])
        # plt.rc("font", size=8)
        # plt.rc("axes", titlesize=12)
        # plt.rc("axes", labelsize=10)
        # plt.rc("xtick", labelsize=8)
        # plt.rc("ytick", labelsize=8)
        # plt.rc("legend", fontsize=8)
        # plt.rc("figure", titlesize=11)
        # #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        # fig, ax = plt.subplots(1,3,figsize=(12,3.5))
        # fig.suptitle('SA Temperature Tuning, problem_size = ' + str(problem_size))
        # ax[0].scatter(param_tuning_list, time_tuning_list, c='r', marker='x', s=10)
        # ax[0].set(xlabel='Temperature', ylabel = 'Time')

        # ax[1].scatter(param_tuning_list, fitness_tuning_list, c='g', marker='o', s=10)
        # ax[1].set(xlabel='Temperature', ylabel = 'Fitness')

        # ax[2].scatter(param_tuning_list, feval_tuning_list, c='g', marker='o', s=10)
        # ax[2].set(xlabel='Temperature', ylabel = 'Function Evaluations')
        # ax[2].yaxis.tick_right()

        # plt.show()

        # fig, ax = plt.subplots()
        # ax.scatter(fdsa_list[17], asdf_list[17])
        # ax.set(xlabel='Iteration', ylabel = 'Fitness')
        # plt.show()

        ga_fitness_tuning_list = []
        ga_param_tuning_list = []
        time_tuning_list = []
        ga_feval_tuning_list = []
        asdf_list = []
        fdsa_list = []
        experiment_name = 'ga_knapsack_tuning_size_' + str(problem_size)
        population_sizes_list = 100,
        mutation_rates_list = np.arange(0.05, 1.0, 0.05)
        ga = runners.GARunner(problem=problem,
                              experiment_name=experiment_name,
                              output_directory='knapsack',
                              seed=27,
                              iteration_list=[100],
                              population_sizes=population_sizes_list,
                              mutation_rates=mutation_rates_list,
                              max_attempts=5)

        # the two data frames will contain the results
        df_run_stats, df_run_curves = ga.run()

        # for rate in mutation_rates_list:
        #     this_temp_df = df_run_curves.loc[df_run_curves['Mutation Rate'] == rate]
        #     this_temp_df['Iteration'] = this_temp_df['Iteration'] - this_temp_df.loc[this_temp_df['Iteration'].idxmin()]['Iteration'] + 1
        #     ga_fitness_tuning_list.append(this_temp_df.loc[this_temp_df['Fitness'].idxmax()]['Fitness'])
        #     ga_param_tuning_list.append(rate)
        #     feval_tuning_list.append(population_sizes_list[0] * this_temp_df.loc[this_temp_df['Iteration'].idxmax()]['Iteration'])
        #     time_tuning_list.append(this_temp_df.loc[this_temp_df['Time'].idxmax()]['Time'])
        #     asdf_list.append(this_temp_df['Fitness'])
        #     fdsa_list.append(this_temp_df['Iteration'])
        # print(time_tuning_list)
        # plt.rc("font", size=8)
        # plt.rc("axes", titlesize=12)
        # plt.rc("axes", labelsize=10)
        # plt.rc("xtick", labelsize=8)
        # plt.rc("ytick", labelsize=8)
        # plt.rc("legend", fontsize=8)
        # plt.rc("figure", titlesize=11)
        # #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        # fig, ax = plt.subplots(1,3,figsize=(12,3.5))
        # fig.suptitle('GA Mutation Rate Tuning, problem_size = ' + str(problem_size))
        # ax[0].scatter(param_tuning_list, time_tuning_list, c='r', marker='x', s=10)
        # ax[0].set(xlabel='Mutation Rate', ylabel = 'Time (s)')

        # ax[1].scatter(param_tuning_list, fitness_tuning_list, c='g', marker='o', s=10)
        # ax[1].set(xlabel='Mutation Rate', ylabel = 'Fitness')

        # ax[2].scatter(param_tuning_list, feval_tuning_list, c='g', marker='o', s=10)
        # ax[2].set(xlabel='Mutation Rate', ylabel = 'Function Evaluations')
        # ax[2].yaxis.tick_right()

        # plt.show()

        # fig, ax = plt.subplots()
        # ax.scatter(fdsa_list[17], asdf_list[17])
        # ax.set(xlabel='Iteration', ylabel = 'Fitness')
        # plt.show()

        # Tune population size
        ga_population_tuning_fitness = []
        ga_population_tuning_time = []
        ga_population_tuning_feval = []
        population_sizes_list = np.arange(10, 500, 10)
        for population_size in population_sizes_list:
            experiment_name = 'ga_knapsack_tuning_population_size_' + str(
                problem_size)
            mutation_rates_list = [0.1]
            ga = runners.GARunner(problem=problem,
                                  experiment_name=experiment_name,
                                  output_directory='knapsack',
                                  seed=27,
                                  iteration_list=[500],
                                  population_sizes=[int(population_size)],
                                  mutation_rates=mutation_rates_list,
                                  max_attempts=10)

            # the two data frames will contain the results
            ga_run_stats, ga_run_curves = ga.run()
            ga_population_tuning_fitness.append(ga_run_curves.loc[
                ga_run_curves['Fitness'].idxmax()]['Fitness'])
            ga_population_tuning_time.append(
                ga_run_curves.loc[ga_run_curves['Time'].idxmax()]['Time'])
            ga_population_tuning_feval.append(
                population_size * ga_run_curves.loc[
                    ga_run_curves['Iteration'].idxmax()]['Iteration'])

        # plt.rc("font", size=8)
        # plt.rc("axes", titlesize=12)
        # plt.rc("axes", labelsize=10)
        # plt.rc("xtick", labelsize=8)
        # plt.rc("ytick", labelsize=8)
        # plt.rc("legend", fontsize=8)
        # plt.rc("figure", titlesize=11)
        # #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        # fig, ax = plt.subplots(1,3,figsize=(12,3.5))
        # fig.suptitle('GA Population Size Tuning, problem_size = ' + str(problem_size))
        # ax[0].scatter(population_sizes_list, ga_population_tuning_time, c='r', marker='x', s=10)
        # ax[0].set(xlabel='Population Size', ylabel = 'Time')

        # ax[1].scatter(population_sizes_list, ga_population_tuning_fitness, c='g', marker='x', s=10)
        # ax[1].set(xlabel='Population Size', ylabel = 'Fitness')

        # ax[2].scatter(param_tuning_list, ga_population_tuning_feval, c='g', marker='o', s=10)
        # ax[2].set(xlabel='Population Size', ylabel = 'Function Evaluations')
        # ax[2].yaxis.tick_right()

        # plt.show()

        mimic_fitness_tuning_list = []
        mimic_param_tuning_list = []
        time_tuning_list = []
        mimic_feval_tuning_list = []
        asdf_list = []
        fdsa_list = []
        experiment_name = 'mimic_knapsack_tuning_size_' + str(problem_size)
        population_sizes_list = 100,
        # keep_percent_list=np.arange(0.05, 1.0, 0.05)
        # mimic = runners.MIMICRunner(problem=problem,
        #             experiment_name=experiment_name,
        #             output_directory='knapsack',
        #             seed=27,
        #             iteration_list=[100],
        #             population_sizes=population_sizes_list,
        #             keep_percent_list=keep_percent_list,
        #             max_attempts=5)

        # # the two data frames will contain the results
        # df_run_stats, df_run_curves = mimic.run()
        # print(df_run_curves.dtypes)
        # print(df_run_curves)
        # #df_run_curves['Temperature'] = pd.to_numeric(df_run_curves['Temperature'].astype(str).astype(float))
        # print(df_run_curves)
        # for percent in keep_percent_list:
        #     this_temp_df = df_run_curves.loc[df_run_curves['Keep Percent'] == percent]
        #     this_temp_df['Iteration'] = this_temp_df['Iteration'] - this_temp_df.loc[this_temp_df['Iteration'].idxmin()]['Iteration'] + 1
        #     mimic_fitness_tuning_list.append(this_temp_df.loc[this_temp_df['Fitness'].idxmax()]['Fitness'])
        #     mimic_param_tuning_list.append(percent)
        #     feval_tuning_list.append(population_sizes_list[0] * this_temp_df.loc[this_temp_df['Iteration'].idxmax()]['Iteration'])
        #     time_tuning_list.append(this_temp_df.loc[this_temp_df['Time'].idxmax()]['Time'])
        #     asdf_list.append(this_temp_df['Fitness'])
        #     fdsa_list.append(this_temp_df['Iteration'])

        # plt.rc("font", size=8)
        # plt.rc("axes", titlesize=12)
        # plt.rc("axes", labelsize=10)
        # plt.rc("xtick", labelsize=8)
        # plt.rc("ytick", labelsize=8)
        # plt.rc("legend", fontsize=8)
        # plt.rc("figure", titlesize=11)
        # #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        # fig, ax = plt.subplots(1,3,figsize=(12,3.5))
        # fig.suptitle('MIMIC Keep Percent Tuning, problem_size = ' + str(problem_size))
        # ax[0].scatter(param_tuning_list, time_tuning_list, c='r', marker='x', s=10)
        # ax[0].set(xlabel='Keep Percent (decimal)', ylabel = 'Time (s)')

        # ax[1].scatter(param_tuning_list, fitness_tuning_list, c='g', marker='o', s=10)
        # ax[1].set(xlabel='Keep Percent (decimal)', ylabel = 'Fitness')

        # ax[2].scatter(param_tuning_list, feval_tuning_list, c='g', marker='o', s=10)
        # ax[2].set(xlabel='Keep Percent (decimal)', ylabel = 'Function Evaluations')
        # ax[2].yaxis.tick_right()

        # plt.show()

        # fig, ax = plt.subplots()
        # ax.scatter(fdsa_list[17], asdf_list[17])
        # ax.set(xlabel='Iteration', ylabel = 'Fitness')
        # plt.show()

        # Tune population size
        mimic_population_tuning_fitness = []
        mimic_population_tuning_time = []
        mimic_population_tuning_feval = []
        population_sizes_list = np.arange(10, 500, 10)
        for population_size in population_sizes_list:
            experiment_name = 'mimic_knapsack_tuning_population_size_' + str(
                problem_size)
            keep_percent_list = [0.45]
            mimic = runners.MIMICRunner(
                problem=problem,
                experiment_name=experiment_name,
                output_directory='knapsack',
                seed=27,
                iteration_list=[100],
                population_sizes=[int(population_size)],
                keep_percent_list=keep_percent_list,
                max_attempts=5,
                use_fast_mimic=True)

            # the two data frames will contain the results
            mimic_run_stats, mimic_run_curves = mimic.run()
            mimic_population_tuning_fitness.append(mimic_run_curves.loc[
                mimic_run_curves['Fitness'].idxmax()]['Fitness'])
            mimic_population_tuning_time.append(mimic_run_curves.loc[
                mimic_run_curves['Time'].idxmax()]['Time'])
            mimic_population_tuning_feval.append(
                population_size * mimic_run_curves.loc[
                    mimic_run_curves['Iteration'].idxmax()]['Iteration'])

        plt.rc("font", size=8)
        plt.rc("axes", titlesize=14)
        plt.rc("axes", labelsize=10)
        plt.rc("xtick", labelsize=8)
        plt.rc("ytick", labelsize=8)
        plt.rc("legend", fontsize=11)
        plt.rc("figure", titlesize=11)
        fig, ax = plt.subplots(2, 4, figsize=(12, 7))
        fig.suptitle('Knapsack Algorithm Tuning, problem size = ' +
                     str(problem_size))

        ax[0, 0].scatter(rhc_param_tuning_list,
                         rhc_fitness_tuning_list,
                         c='r',
                         marker='x',
                         s=10)
        ax[0, 0].set(xlabel='Restarts', ylabel='Fitness', title='RHC Restarts')

        ax[0, 1].scatter(sa_param_tuning_list,
                         sa_fitness_tuning_list,
                         c='g',
                         marker='o',
                         s=10)
        ax[0, 1].set(xlabel='Temperature', title='SA Temperature')

        ax[0, 2].scatter(population_sizes_list,
                         ga_population_tuning_fitness,
                         c='g',
                         marker='o',
                         s=10)
        ax[0, 2].set(xlabel='Population Size', title='GA Population Size')
        ax[0, 2].yaxis.tick_right()

        ax[0, 3].scatter(population_sizes_list,
                         mimic_population_tuning_fitness,
                         c='g',
                         marker='o',
                         s=10)
        ax[0, 3].set(xlabel='Population Size', title='MIMIC Population Size')
        ax[0, 3].yaxis.tick_right()

        ax[1, 0].scatter(rhc_param_tuning_list,
                         rhc_feval_tuning_list,
                         c='r',
                         marker='x',
                         s=10)
        ax[1, 0].set(xlabel='Restarts', ylabel='Function Evaluations')

        ax[1, 1].scatter(sa_param_tuning_list,
                         sa_feval_tuning_list,
                         c='g',
                         marker='o',
                         s=10)
        ax[1, 1].set(xlabel='Temperature')

        ax[1, 2].scatter(population_sizes_list,
                         ga_population_tuning_feval,
                         c='g',
                         marker='o',
                         s=10)
        ax[1, 2].set(xlabel='Population Size')
        ax[1, 2].yaxis.tick_right()

        ax[1, 3].scatter(population_sizes_list,
                         mimic_population_tuning_feval,
                         c='g',
                         marker='o',
                         s=10)
        ax[1, 3].set(xlabel='Population Size')
        ax[1, 3].yaxis.tick_right()

        plt.show()

    if 'complexity_graph' in task:
        problem_size_list = np.arange(5, 85, 5)
        sa_time_list = []
        sa_fitness_list = []
        sa_feval_list = []
        rhc_time_list = []
        rhc_fitness_list = []
        rhc_feval_list = []
        ga_time_list = []
        ga_fitness_list = []
        ga_feval_list = []
        mimic_time_list = []
        mimic_fitness_list = []
        mimic_feval_list = []
        for problem_size in problem_size_list:
            # Knapsack
            weights = [idx for idx in range(1, problem_size + 1)]
            print(weights)
            values = [idx for idx in range(1, problem_size + 1)]
            max_weight_pct = 0.3
            knapsack_fitness = mlrose.Knapsack(weights, values, max_weight_pct)
            best_fitness_list = []
            problem = mlrose.DiscreteOpt(int(problem_size),
                                         knapsack_fitness,
                                         maximize=True,
                                         max_val=2)

            # RHC
            experiment_name = 'rhc_knapsack_complexity_size_' + str(
                problem_size)
            restart_list = [100]
            rhc = runners.RHCRunner(problem=problem,
                                    experiment_name=experiment_name,
                                    output_directory='knapsack',
                                    seed=27,
                                    iteration_list=[5000],
                                    max_attempts=10,
                                    restart_list=restart_list)
            # the two data frames will contain the results
            rhc_run_stats, rhc_run_curves = rhc.run()
            rhc_time = rhc_run_curves['Time']
            rhc_fitness = rhc_run_curves['Fitness']
            rhc_iteration = rhc_run_curves['Iteration']
            rhc_fitness_list.append(rhc_run_curves.loc[
                rhc_run_curves['Fitness'].idxmax()]['Fitness'])
            rhc_time_list.append(
                rhc_run_curves.loc[rhc_run_curves['Time'].idxmax()]['Time'])
            rhc_feval_list.append(3 * rhc_run_curves.loc[
                rhc_run_curves['Iteration'].idxmax()]['Iteration'])

            # SA
            experiment_name = 'sa_knapsack_complexity_size_' + str(
                problem_size)
            temperature_list = [2]
            sa = runners.SARunner(problem=problem,
                                  experiment_name=experiment_name,
                                  output_directory='knapsack',
                                  seed=27,
                                  iteration_list=[10000],
                                  max_attempts=50,
                                  temperature_list=temperature_list)
            # the two data frames will contain the results
            sa_run_stats, sa_run_curves = sa.run()
            # print(sa_run_curves.dtypes)
            # print(sa_run_curves)
            sa_run_curves['Temperature'] = pd.to_numeric(
                sa_run_curves['Temperature'].astype(str).astype(float))
            # print(df_run_curves)
            sa_time = sa_run_curves['Time']
            sa_fitness = sa_run_curves['Fitness']
            sa_iteration = sa_run_curves['Iteration']
            sa_fitness_list.append(sa_run_curves.loc[
                sa_run_curves['Fitness'].idxmax()]['Fitness'])
            sa_time_list.append(
                sa_run_curves.loc[sa_run_curves['Time'].idxmax()]['Time'])
            sa_feval_list.append(2 * sa_run_curves.loc[
                sa_run_curves['Iteration'].idxmax()]['Iteration'])

            # GA
            experiment_name = 'ga_knapsack_complexity_size_' + str(
                problem_size)
            population_sizes_list = 100,
            mutation_rates_list = [0.15]
            ga = runners.GARunner(problem=problem,
                                  experiment_name=experiment_name,
                                  output_directory='knapsack',
                                  seed=27,
                                  iteration_list=[1000],
                                  population_sizes=population_sizes_list,
                                  mutation_rates=mutation_rates_list,
                                  max_attempts=100)
            # the two data frames will contain the results
            ga_run_stats, ga_run_curves = ga.run()
            # print(ga_run_curves.dtypes)
            # print(ga_run_curves)
            # print(df_run_curves)
            ga_time = ga_run_curves['Time']
            ga_fitness = ga_run_curves['Fitness']
            ga_iteration = ga_run_curves['Iteration']
            ga_fitness_list.append(ga_run_curves.loc[
                ga_run_curves['Fitness'].idxmax()]['Fitness'])
            ga_time_list.append(
                ga_run_curves.loc[ga_run_curves['Time'].idxmax()]['Time'])
            ga_feval_list.append(population_sizes_list[0] * ga_run_curves.loc[
                ga_run_curves['Iteration'].idxmax()]['Iteration'])

            # MIMC
            experiment_name = 'mimic_knapsack_complexity_size_' + str(
                problem_size)
            population_sizes_list = 200,
            keep_percent_list = [0.35]
            mimic = runners.MIMICRunner(problem=problem,
                                        experiment_name=experiment_name,
                                        output_directory='knapsack',
                                        seed=27,
                                        iteration_list=[150],
                                        population_sizes=population_sizes_list,
                                        keep_percent_list=keep_percent_list,
                                        max_attempts=15,
                                        use_fast_mimic=True)
            # the two data frames will contain the results
            mimic_run_stats, mimic_run_curves = mimic.run()
            # print(mimic_run_curves.dtypes)
            # print(mimic_run_curves)
            # print(df_run_curves)
            mimic_time = mimic_run_curves['Time']
            mimic_fitness = mimic_run_curves['Fitness']
            mimic_iteration = mimic_run_curves['Iteration']
            mimic_fitness_list.append(mimic_run_curves.loc[
                mimic_run_curves['Fitness'].idxmax()]['Fitness'])
            mimic_time_list.append(mimic_run_curves.loc[
                mimic_run_curves['Time'].idxmax()]['Time'])
            mimic_feval_list.append(
                population_sizes_list[0] * mimic_run_curves.loc[
                    mimic_run_curves['Iteration'].idxmax()]['Iteration'])

        plt.rc("font", size=8)
        plt.rc("axes", titlesize=12)
        plt.rc("axes", labelsize=10)
        plt.rc("xtick", labelsize=8)
        plt.rc("ytick", labelsize=8)
        plt.rc("legend", fontsize=8)
        plt.rc("figure", titlesize=11)
        #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        fig, ax = plt.subplots(1, 3, figsize=(12, 3.5))
        fig.suptitle('Knapsack Complexity Analysis', fontsize=14)
        # ax[0].plot(problem_size_list, sa_fitness_list, 'b-', label='Simulated Annealing', linewidth=1)
        # ax[0].plot(problem_size_list, ga_fitness_list, 'g:', label='Genetic', linewidth=1)
        w = 1
        ax[0].bar(problem_size_list - w,
                  sa_fitness_list,
                  width=w,
                  color='blue',
                  label='Simulated Annealing')
        ax[0].bar(problem_size_list,
                  ga_fitness_list,
                  width=w,
                  color='green',
                  label='Genetic')
        ax[0].bar(problem_size_list - 2 * w,
                  rhc_fitness_list,
                  width=w,
                  color='red',
                  label='Random Hill Climb')
        ax[0].bar(problem_size_list + w,
                  mimic_fitness_list,
                  width=w,
                  color='orange',
                  label='MIMIC')
        ax[0].set(xlabel='Knapsack Size', ylabel='Fitness')
        ax[0].legend()

        ax[1].plot(problem_size_list,
                   sa_time_list,
                   'b-',
                   label='Simulated Annealing',
                   linewidth=1)
        ax[1].plot(problem_size_list,
                   ga_time_list,
                   'g:',
                   label='Genetic',
                   linewidth=1)
        ax[1].plot(problem_size_list,
                   rhc_time_list,
                   'r--',
                   label='Random Hill Climb',
                   linewidth=1)
        ax[1].plot(problem_size_list,
                   mimic_time_list,
                   '-.',
                   color='orange',
                   label='MIMIC',
                   linewidth=1)
        ax[1].set(xlabel='Knapsack Size', ylabel='Time (s)')
        ax[1].legend()

        ax[2].plot(problem_size_list,
                   sa_feval_list,
                   'b-',
                   label='Simulated Annealing',
                   linewidth=1)
        ax[2].plot(problem_size_list,
                   ga_feval_list,
                   'g:',
                   label='Genetic',
                   linewidth=1)
        ax[2].plot(problem_size_list,
                   rhc_feval_list,
                   'r--',
                   label='Random Hill Climb',
                   linewidth=1)
        ax[2].plot(problem_size_list,
                   mimic_feval_list,
                   '-.',
                   color='orange',
                   label='MIMIC',
                   linewidth=1)
        ax[2].set(xlabel='Knapsack Size', ylabel='Function Evaluations')
        ax[2].yaxis.tick_right()
        plt.show()

    if 'performance_graph' in task:
        problem_size = 80

        # Knapsack
        weights = [idx for idx in range(1, problem_size + 1)]
        print(weights)
        values = [idx for idx in range(1, problem_size + 1)]
        max_weight_pct = 0.3
        knapsack_fitness = mlrose.Knapsack(weights, values, max_weight_pct)
        best_fitness_list = []
        problem = mlrose.DiscreteOpt(int(problem_size),
                                     knapsack_fitness,
                                     maximize=True,
                                     max_val=2)

        # RHC
        experiment_name = 'rhc_knapsack_performance_size_' + str(problem_size)
        restart_list = [100]
        rhc = runners.RHCRunner(problem=problem,
                                experiment_name=experiment_name,
                                output_directory='knapsack',
                                seed=27,
                                iteration_list=[5000],
                                max_attempts=10,
                                restart_list=restart_list)
        # the two data frames will contain the results
        rhc_run_stats, rhc_run_curves = rhc.run()
        # print(rhc_run_curves.dtypes)
        # print(rhc_run_curves)
        # print(df_run_curves)
        rhc_time = rhc_run_curves['Time']
        rhc_fitness = rhc_run_curves['Fitness']
        rhc_iteration = rhc_run_curves['Iteration']
        rhc_feval = rhc_run_curves['Iteration'] * 2

        # SA
        experiment_name = 'sa_knapsack_performance_size_' + str(problem_size)
        temperature_list = [2]
        sa = runners.SARunner(problem=problem,
                              experiment_name=experiment_name,
                              output_directory='knapsack',
                              seed=27,
                              iteration_list=[10000],
                              max_attempts=50,
                              temperature_list=temperature_list)
        # the two data frames will contain the results
        sa_run_stats, sa_run_curves = sa.run()
        # print(sa_run_curves.dtypes)
        # print(sa_run_curves)
        sa_run_curves['Temperature'] = pd.to_numeric(
            sa_run_curves['Temperature'].astype(str).astype(float))
        # print(df_run_curves)
        sa_time = sa_run_curves['Time']
        sa_fitness = sa_run_curves['Fitness']
        sa_iteration = sa_run_curves['Iteration']
        sa_feval = sa_run_curves['Iteration'] * 2

        # GA
        experiment_name = 'ga_knapsack_performance_size_' + str(problem_size)
        population_sizes_list = 100,
        mutation_rates_list = [0.15]
        ga = runners.GARunner(problem=problem,
                              experiment_name=experiment_name,
                              output_directory='knapsack',
                              seed=27,
                              iteration_list=[1000],
                              population_sizes=population_sizes_list,
                              mutation_rates=mutation_rates_list,
                              max_attempts=100)
        # the two data frames will contain the results
        ga_run_stats, ga_run_curves = ga.run()
        # print(ga_run_curves.dtypes)
        # print(ga_run_curves)
        # print(df_run_curves)
        ga_time = ga_run_curves['Time']
        ga_fitness = ga_run_curves['Fitness']
        ga_iteration = ga_run_curves['Iteration']
        ga_feval = ga_run_curves['Iteration'] * population_sizes_list

        # MIMC
        experiment_name = 'mimic_knapsack_performance_size_' + str(
            problem_size)
        population_sizes_list = 200,
        keep_percent_list = [0.5]
        mimic = runners.MIMICRunner(problem=problem,
                                    experiment_name=experiment_name,
                                    output_directory='knapsack',
                                    seed=27,
                                    iteration_list=[150],
                                    population_sizes=population_sizes_list,
                                    keep_percent_list=keep_percent_list,
                                    max_attempts=15,
                                    use_fast_mimic=True)
        # the two data frames will contain the results
        mimic_run_stats, mimic_run_curves = mimic.run()
        # print(mimic_run_curves.dtypes)
        # print(mimic_run_curves)
        # print(df_run_curves)
        mimic_time = mimic_run_curves['Time']
        mimic_fitness = mimic_run_curves['Fitness']
        mimic_iteration = mimic_run_curves['Iteration']
        mimic_feval = mimic_run_curves['Iteration'] * population_sizes_list

        plt.rc("font", size=8)
        plt.rc("axes", titlesize=12)
        plt.rc("axes", labelsize=10)
        plt.rc("xtick", labelsize=8)
        plt.rc("ytick", labelsize=8)
        plt.rc("legend", fontsize=8)
        plt.rc("figure", titlesize=11)
        #fig, ax = plt.subplots(2, 1, dpi=100, sharex=True, figsize=(5,4))
        fig, ax = plt.subplots(1, 3, figsize=(12, 3.5))
        fig.suptitle(
            'Knapsack Algorithm Performance Analysis, problem size = ' +
            str(problem_size),
            fontsize=14)
        # ax[0].plot(problem_size_list, sa_fitness_list, 'b-', label='Simulated Annealing', linewidth=1)
        # ax[0].plot(problem_size_list, ga_fitness_list, 'g:', label='Genetic', linewidth=1)
        w = 1
        ax[0].plot(rhc_iteration,
                   rhc_fitness,
                   'r--',
                   label='Random Hill Climb',
                   linewidth=1)
        ax[0].plot(sa_iteration,
                   sa_fitness,
                   'b:',
                   label='Simulated Annealing',
                   linewidth=1)
        ax[0].plot(ga_iteration,
                   ga_fitness,
                   'g-',
                   label='Genetic',
                   linewidth=2)
        ax[0].plot(mimic_iteration,
                   mimic_fitness,
                   '-.',
                   color='orange',
                   label='MIMIC',
                   linewidth=2)
        ax[0].set(xlabel='Iteration', ylabel='Fitness')
        ax[0].legend()
        #ax[0].set_title('Fitness vs. Iteration')

        ax[1].plot(rhc_time,
                   rhc_fitness,
                   'r--',
                   label='Random Hill Climb',
                   linewidth=1)
        ax[1].plot(sa_time,
                   sa_fitness,
                   'b:',
                   label='Simulated Annealing',
                   linewidth=1)
        ax[1].plot(ga_time, ga_fitness, 'g-', label='Genetic', linewidth=2)
        ax[1].plot(mimic_time,
                   mimic_fitness,
                   '-.',
                   color='orange',
                   label='MIMIC',
                   linewidth=2)
        ax[1].set(xlabel='Time (s)', ylabel='Fitness')
        ax[1].legend()

        ax[2].plot(rhc_feval,
                   rhc_fitness,
                   'r--',
                   label='Random Hill Climb',
                   linewidth=1)
        ax[2].plot(sa_feval,
                   sa_fitness,
                   'b:',
                   label='Simulated Annealing',
                   linewidth=1)
        ax[2].plot(ga_feval, ga_fitness, 'g-', label='Genetic', linewidth=1)
        ax[2].plot(mimic_feval,
                   mimic_fitness,
                   '-.',
                   color='orange',
                   label='MIMIC',
                   linewidth=1)
        ax[2].set(xlabel='Function Evaluations')
        plt.show()

    return