Esempio n. 1
0
File: order.py Progetto: pn51/FOQUS
def rank(fnames, ga_max_attempts=25):
    """return fnames ranked"""
    dist_mat = np.load(fnames["dmat"])
    dist_list = mat2tuples(dist_mat)

    # define fitness function object
    fitness_dists = mlrose.TravellingSales(distances=dist_list)

    # define optimization problem object
    n_len = dist_mat.shape[0]
    problem_fit = mlrose.TSPOpt(length=n_len, fitness_fn=fitness_dists, maximize=False)

    # solve problem using the genetic algorithm
    best_state = mlrose.genetic_alg(
        problem_fit, mutation_prob=0.2, max_attempts=ga_max_attempts, random_state=2
    )[0]

    # retrieve ranked list
    cand = load(fnames["cand"])
    ranked_cand = cand.loc[best_state]

    # save the output
    fname, ext = os.path.splitext(fnames["cand"])
    fname_ranked = fname + "_ranked" + ext
    write(fname_ranked, ranked_cand)
    _log.info("Ordered candidates saved to %s", fname_ranked)

    return fname_ranked
Esempio n. 2
0
def get_fitness_functions():
    df = pd.read_csv("./houston2008_order.csv")
    coord_list = list(df[['lat', 'long']].apply(tuple, axis=1))
    coord_list = coord_list[0:30]
    fitness_tsp = mlrose.TravellingSales(coords=coord_list)
    problem_tsp = mlrose.TSPOpt(length=len(coord_list),
                                fitness_fn=fitness_tsp,
                                maximize=False)

    fitness_fourpeak = mlrose.FourPeaks(t_pct=.3)
    problem_fourpeak = mlrose.DiscreteOpt(length=20,
                                          fitness_fn=fitness_fourpeak)

    fitness_flipflop = mlrose.FlipFlop()
    problem_flipflop = mlrose.DiscreteOpt(length=30,
                                          fitness_fn=fitness_flipflop)

    fitness_one_max = mlrose.OneMax()
    problem_one_max = mlrose.DiscreteOpt(
        length=35,
        fitness_fn=fitness_one_max,
    )

    weights = [10, 5, 2, 8, 15]
    values = [1, 2, 3, 4, 5]
    max_weight_pct = 0.6
    fitness_knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    problem_knapsack = mlrose.DiscreteOpt(length=5,
                                          fitness_fn=fitness_knapsack)

    return {
        "tsp": problem_tsp,
        "four_peaks": problem_fourpeak,
        "one_max": problem_one_max,
    }
Esempio n. 3
0
    def run_sa_hyper_params(self, fitness_fn):
        fitness_name = fitness_fn.__class__.__name__
        print("Running %s" % fitness_name)
        init_states = {}
        knap_fitnesses = {}
        tsp_fitnesses = {}
        tries = 1
        for x in 2**np.arange(6, 7):
            n = int(x)
            fitness_dists = mlrose.TravellingSales(distances=get_coords(n))
            tsp_fitnesses[n] = fitness_dists
            edges = []
            for x in range(int(n * 0.75)):
                a = r.randint(0, n - 1)
                b = r.randint(0, n - 1)
                while b == a:
                    b = r.randint(0, n - 1)
                edges.append((a, b))

            fitness_fn_knap = mlrose.MaxKColor(edges=edges)
            init_states[n] = []
            knap_fitnesses[n] = fitness_fn_knap
            for y in range(tries):
                init_states[n].append(get_init_state(n))

        for n, init_states_list in init_states.items():
            if fitness_name == 'MaxKColor':
                fitness_fn = knap_fitnesses[n]
            if fitness_name == 'TravellingSales':
                fitness_fn = tsp_fitnesses[n]
            print(n)
            print('%s: i=%d' % ('simulated_annealing', n))

            for init_state in init_states_list:
                problem = mlrose.DiscreteOpt(length=len(init_state),
                                             fitness_fn=fitness_fn,
                                             maximize=True)
                if fitness_name == 'TravellingSales':
                    problem = mlrose.TSPOpt(length=n, fitness_fn=fitness_fn)
                for max_attempts in range(10, 110, 10):
                    total_score = 0
                    total_iter = 0
                    best_state, best_fitness, curve = mlrose.simulated_annealing(
                        problem,
                        max_attempts=max_attempts,
                        max_iters=10000,
                        random_state=1,
                        curve=True)
                    total_score += np.max(curve)
                    total_iter += len(curve)
                    print('The fitness at the best state is: ',
                          total_score / tries, '. Max Attempts: ',
                          max_attempts)
                    self.track_best_params(problem=fitness_name,
                                           algo='simulated_annealing',
                                           param='max_attempts',
                                           score=total_score,
                                           value=max_attempts)
Esempio n. 4
0
def create_tsp_problem(length=10, width=10, cities=8):
    coords_list = []
    np.random.seed(786)

    for city in range(0, cities):
        new_city = (np.random.randint(0, length), np.random.randint(0, width))
        if new_city not in coords_list:
            coords_list.append(new_city)

    tsp_coords = mlrose_hiive.fitness.TravellingSales(coords=coords_list)
    tsp_problem = mlrose_hiive.TSPOpt(length=cities, fitness_fn=tsp_coords, maximize=True)
    return tsp_problem
Esempio n. 5
0
    def get_order(self, distance_matrix) -> Tuple[np.ndarray, float]:
        n_locations = len(distance_matrix)
        distance_triples = _create_distance_triples(distance_matrix)
        fitness_dists = mlrose.TravellingSales(distances=distance_triples)
        problem_fit = mlrose.TSPOpt(length=n_locations,
                                    fitness_fn=fitness_dists,
                                    maximize=False)
        best_state, best_fitness, iterations = mlrose.genetic_alg(
            problem_fit, mutation_prob=0.2, max_attempts=100, random_state=2)

        best_state = super()._close_circle(
            super()._roll_state_in_order(best_state))
        return best_state, best_fitness
Esempio n. 6
0
 def find_best(self, pois: list, **kwargs):
     logging.debug('init bucket')
     bucket = Bucket(self.routing)
     logging.debug('compute distance matrix')
     self.compute_distances(pois, bucket)
     logging.debug('define fitness function')
     fitness_dists = mlrose.TravellingSales(distances=bucket.distances)
     logging.debug('define optimization problem')
     problem_fit = mlrose.TSPOpt(
         length=len(pois),
         fitness_fn=fitness_dists,
         maximize=False
     )
     logging.debug('run randomized optimization algorithm')
     best_state, best_fitness, fitness_curve = mlrose.genetic_alg(problem_fit, random_state=2, **kwargs)
     segments = bucket.segments(self._generate_keys(best_state))
     meters = sum([segment.path.distance for segment in segments])
     return Tour(segments, meters)
Esempio n. 7
0
 def optimize(self):
     problem_size_space = self.problem_size
     # Initializing the problem
     init_state = []
     length = problem_size_space // 5
     for row in range(5):
         for col in range(length):
             init_state.append((row, col))
     problem = mlrose.TSPOpt(length=problem_size_space,
                             maximize=False,
                             coords=init_state)
     # SA
     # super().gridSearchSA(problem,'TSP',problem_size_space,self.noOfiteration)
     # RHC
     # super().gridSearchRHC(problem,'TSP',problem_size_space,self.noOfiteration)
     #GA
     # super().gridSearchGA(problem,'TSP',problem_size_space,self.noOfiteration)
     #MIMIC
     super().gridSearchMIMIC(problem, 'TSP', problem_size_space,
                             self.noOfiteration)
def main():
    ## SET SOME PARAMS TO USE GLOBALLY
    max_iters_list = [50, 100, 1000]  #,32,64,128,256,512,1024]
    max_iters_list_full = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
    rand_list = [1, 11, 22]  #,44,55,66,77,88,99]
    rand_list_full = [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]
    input_location = 'data/'
    output_location = 'outputs/'
    chart_output_location = 'charts/'
    prefix = '5th_'

    ## DEFINE PROBLEMS TO SOLVE
    # Traveling Salesman Problem (TSP)
    space_length = 1000
    cities_cnt = 200
    coords_list, x, y = create_TSP(space_length,
                                   cities_cnt,
                                   return_lists_too=True)
    plt.plot(x, y, 'o')
    plt.savefig(chart_output_location + 'TPS_visual' + '.png')
    fitness_coords = mlrose.TravellingSales(coords=coords_list)
    problem_TSP = mlrose.TSPOpt(length=len(coords_list),
                                fitness_fn=fitness_coords,
                                maximize=False)

    # 4 Peaks
    t_pct = 0.1
    length = 200
    fitness_4_peaks = mlrose.FourPeaks(t_pct=t_pct)
    problem_4P = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_4_peaks,
                                    maximize=True,
                                    max_val=2)
    problem_4P_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_4_peaks,
                                          maximize=True,
                                          max_val=2)
    problem_4P_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_4_peaks,
                                        maximize=True,
                                        max_val=2)

    # Continuous Peaks
    t_pct = 0.1
    length = 200
    fitness_cont_peaks = mlrose.ContinuousPeaks(t_pct=t_pct)
    problem_cont_peaks = mlrose.DiscreteOpt(length=length,
                                            fitness_fn=fitness_cont_peaks,
                                            maximize=True,
                                            max_val=2)

    # Flip Flop
    length = 200
    fitness_FF = mlrose.FlipFlop()
    problem_FF = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_FF,
                                    maximize=True,
                                    max_val=2)
    problem_FF_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_FF,
                                          maximize=True,
                                          max_val=2)
    problem_FF_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_FF,
                                        maximize=True,
                                        max_val=2)

    # Knapsack
    length = 200
    weights, values = create_Knapsack(length)
    weights_big, values_big = create_Knapsack(1000)
    weights_small, values_small = create_Knapsack(50)
    fitness_KS = mlrose.Knapsack(weights, values, max_weight_pct=0.65)
    fitness_KS_big = mlrose.Knapsack(weights_big,
                                     values_big,
                                     max_weight_pct=0.65)
    fitness_KS_small = mlrose.Knapsack(weights_small,
                                       values_small,
                                       max_weight_pct=0.65)
    problem_KS = mlrose.DiscreteOpt(length=length,
                                    fitness_fn=fitness_KS,
                                    maximize=True,
                                    max_val=2)
    problem_KS_big = mlrose.DiscreteOpt(length=1000,
                                        fitness_fn=fitness_KS_big,
                                        maximize=True,
                                        max_val=2)
    problem_KS_small = mlrose.DiscreteOpt(length=50,
                                          fitness_fn=fitness_KS_small,
                                          maximize=True,
                                          max_val=2)

    dict_of_param_dict = {}
    dict_of_param_dict['GA'] = {
        'pop_size': [100, 200],  #,1000],
        'mutation_prob': [0.5, 0.1, 0.2],
        'max_attempts': [5, 10, 30],
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['RHC'] = {
        'max_attempts': [30, 50, 100],  #[5,10,20,50]
        'restarts': [5, 10, 20],  #[0,1,2,5]
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['SA'] = {
        'max_attempts': [10, 50, 100],
        'init_temp': [1.0, 10.0, 0.5, 20, 100, 1000],
        'decay': [0.99, 0.8, 0.5],
        'max_iters': max_iters_list,
        'random_state': rand_list
    }
    dict_of_param_dict['MIMIC'] = {
        'pop_size': [100, 150],
        'keep_pct': [0.5, 0.2],
        'max_attempts': [10],
        'max_iters': [100],
        'random_state': rand_list
    }

    MIMIC_FF = {
        'pop_size': 100,
        'keep_pct': 0.5,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_4P = {
        'pop_size': 150,
        'keep_pct': 0.2,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_KS = {
        'pop_size': 150,
        'keep_pct': 0.5,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    MIMIC_CP = {
        'pop_size': 200,
        'keep_pct': 0.2,
        'max_attempts': 30,
        'max_iters': [2, 4, 8, 16, 32, 64,
                      128],  ## put full list here before uploading
        'random_state': [0, 11, 22, 33, 44]
    }
    GA_FF = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.5,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    MIMIC_FF2 = {
        'pop_size': [100],
        'keep_pct': [0.5],
        'max_attempts': [30, 50],
        'max_iters': [64],
        'random_state': [55]  #,66,77,88,99]
    }

    print("starting MIMIC FF")
    # GETTING MIMIC FF RESULTS
    print("starting MIMIC FF...")
    ''' ## Started running at 3am
    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_FF, MIMIC_FF['max_iters'], MIMIC_FF['random_state']\
    , pop_size=MIMIC_FF['pop_size'], max_attempts=MIMIC_FF['max_attempts'], curve=True, keep_pct=MIMIC_FF['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_FF_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_4P, MIMIC_4P['max_iters'], MIMIC_4P['random_state']\
    , pop_size=MIMIC_4P['pop_size'], max_attempts=MIMIC_4P['max_attempts'], curve=True, keep_pct=MIMIC_4P['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_4P_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_KS, MIMIC_KS['max_iters'], MIMIC_KS['random_state']\
    , pop_size=MIMIC_KS['pop_size'], max_attempts=MIMIC_KS['max_attempts'], curve=True, keep_pct=MIMIC_KS['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_KS_attempt_3am.csv')


    results_df, curve_output_list = fitness_by_iter('MIMIC', problem_cont_peaks, MIMIC_CP['max_iters'], MIMIC_CP['random_state']\
    , pop_size=MIMIC_CP['pop_size'], max_attempts=MIMIC_CP['max_attempts'], curve=True, keep_pct=MIMIC_CP['keep_pct'])
    results_df.to_csv(output_location + 'final_MIMIC_CP_attempt_3am.csv')

    '''

    ## USED FOR GRID SEARCHING PARAMETERS FOR RO ON 3 PROBLEMS
    GA_params_dict = get_params_for_grid_search('GA', max_iters_list=[200])
    print("Here are my GA params for grid search: ", GA_params_dict)
    SA_params_dict = get_params_for_grid_search('SA',
                                                max_iters_list=max_iters_list)
    print("Here are my SA params for grid search: ", SA_params_dict)
    RHC_params_dict = get_params_for_grid_search('RHC',
                                                 max_iters_list=max_iters_list)
    print("Here are my RHC params for grid search: ", RHC_params_dict)
    MIMIC_params_dict = get_params_for_grid_search(
        'MIMIC', max_iters_list=max_iters_list)
    print("Here are my MIMIC params for grid search: ", MIMIC_params_dict)
    #grid_search_MIMIC = MIMIC_best_params(problem_TPS, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + 'grid_search_MIMIC.csv')
    '''
    grid_search_GA = GA_best_params(problem_FF, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_FF_really.csv')
    print("finished GA")
    grid_search_MIMIC = MIMIC_best_params(problem_FF, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_FF_really.csv')
    '''
    print("finished MIMIC FF")

    print("Doing GA rn")
    #results_df, curve_output_list = fitness_by_iter('GA', problem_FF, GA_FF['max_iters'], GA_FF['random_state']\
    #, pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], mutation_prob=GA_FF['mutation_prob'],curve=True)
    #results_df.to_csv(output_location + 'final_MIMIC_FF_attempt_1am.csv')
    print("finished GA")
    ''' GRID SEARCHING

    print("Starting grid search for RHC")
    grid_search_RHC = RHC_best_params(problem_TSP, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix +'grid_search_RHC_TSP.csv')
    grid_search_RHC = RHC_best_params(problem_FF, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_FF.csv')
    grid_search_RHC = RHC_best_params(problem_cont_peaks, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_cont_peaks.csv')
    grid_search_RHC = RHC_best_params(problem_4P, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_4P.csv')

    print("Starting grid search for SA")
    grid_search_SA = SA_best_params(problem_TSP, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_TSP.csv')
    grid_search_SA = SA_best_params(problem_FF, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_FF.csv')
    grid_search_SA = SA_best_params(problem_cont_peaks, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_cont_peaks.csv')
    grid_search_SA = SA_best_params(problem_4P, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_4P.csv')

    print("Starting grid search for GA")
    grid_search_GA = GA_best_params(problem_TSP, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_TSP.csv')
    grid_search_GA = GA_best_params(problem_FF, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_FF.csv')
    grid_search_GA = GA_best_params(problem_cont_peaks, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_cont_peaks.csv')
    grid_search_GA = GA_best_params(problem_4P, GA_params_dict, inverse_fitness=False)
    grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_4P.csv')
    '''
    '''
    print("Starting grid search for MIMIC")
    grid_search_MIMIC = MIMIC_best_params(problem_FF, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_FF.csv')
    #grid_search_MIMIC = MIMIC_best_params(problem_cont_peaks, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_cont_peaks.csv')
    grid_search_MIMIC = MIMIC_best_params(problem_4P, MIMIC_params_dict, inverse_fitness=False)
    grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_4P.csv')
    #grid_search_MIMIC = MIMIC_best_params(problem_TSP, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + 'grid_search_MIMIC_TSP.csv')
    print("Finished MIMIC grid searches")

    print("Starting grid search for Knapsack")
    #grid_search_MIMIC = MIMIC_best_params(problem_KS, MIMIC_params_dict, inverse_fitness=False)
    #grid_search_MIMIC.to_csv(output_location + prefix + 'grid_search_MIMIC_KS.csv')
    #grid_search_GA = GA_best_params(problem_KS, GA_params_dict, inverse_fitness=False)
    #grid_search_GA.to_csv(output_location + prefix + 'grid_search_GA_KS.csv')
    grid_search_SA = SA_best_params(problem_KS, SA_params_dict, inverse_fitness=False)
    grid_search_SA.to_csv(output_location + prefix + 'grid_search_SA_KS.csv')
    grid_search_RHC = RHC_best_params(problem_KS, RHC_params_dict, inverse_fitness=False)
    grid_search_RHC.to_csv(output_location + prefix + 'grid_search_RHC_KS.csv')
    '''

    ## Fitting MIMIC separately and with fewer iterations for all except the FF as run time is so long for MIMIC
    max = 128
    ''' MIMIC CURVE FOR CHARTS ##### Started (again) at 8am ######

    print("Fitting for MIMIC using the 'curve=True' functionality")
    print("First for KS")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_KS, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_KS_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_short_curve.csv')
    print("Finished KS")

    print("Next for 4 Peaks")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 100 and 0.5")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop100_keep50_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop100_keep50_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 100 and 0.2")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop100_keep20_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop100_keep20_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks with 150 and 0.5")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P, pop_size=100, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_pop150_keep50_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_pop150_keep50_short_curve.csv')
    print("Finished 4 Peaks")

    print("Next for 4 Peaks Big")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P_big, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_big_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_big_short_curve.csv')
    print("Finished 4 Peaks Big")

    print("Next for KS Small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_KS_small, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_KS_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_small_short_curve.csv')
    print("Finished KS small")

    print("Next FF small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_FF_small, pop_size=100, keep_pct=0.5, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_FF_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_KS_small_short_curve.csv')
    print("Finished FF Small")

    print("Next for 4 Peaks Small")
    start_time_fit = time.perf_counter()
    a,b,curve_output = mlrose.mimic(problem_4P_small, pop_size=150, keep_pct=0.2, max_attempts=10, max_iters=128, curve=True\
    , random_state=0)
    end_time_fit = time.perf_counter()
    time_used = end_time_fit - start_time_fit
    df1, df2 = curve_to_df(curve_output, max)
    df2['time_to_128'] = time_used
    df1.to_csv(output_location+'MIMIC_4P_small_full_curve.csv')
    df2.to_csv(output_location+'MIMIC_4P_small_short_curve.csv')
    print("Finished 4 Peaks Small")
    '''

    ### Now GA

    GA_FF = {
        'pop_size': 100,  #,1000],
        'mutation_prob': 0.1,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    GA_KS = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.2,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    GA_4P = {
        'pop_size': 200,  #,1000],
        'mutation_prob': 0.5,
        'max_attempts': 30,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    ''' More fitness by iteration calculations
    #results_df, curve_output_list = fitness_by_iter('GA', problem_FF, GA_FF['max_iters'], GA_FF['random_state']\
    #, pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_FF_small, GA_FF['max_iters'], GA_FF['random_state']\
    , pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_FF_small_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_FF_big, GA_FF['max_iters'], GA_FF['random_state']\
    , pop_size=GA_FF['pop_size'], max_attempts=GA_FF['max_attempts'], curve=True, mutation_prob=GA_FF['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_FF_big_attempt_8am.csv')



    #results_df, curve_output_list = fitness_by_iter('GA', problem_4P, GA_4P['max_iters'], GA_4P['random_state']\
    #, pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_4P_big, GA_4P['max_iters'], GA_4P['random_state']\
    , pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_4P_small, GA_4P['max_iters'], GA_4P['random_state']\
    , pop_size=GA_4P['pop_size'], max_attempts=GA_4P['max_attempts'], curve=True, mutation_prob=GA_4P['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_4P_small_attempt_8am.csv')



    #results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    #, pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    #results_df.to_csv(output_location + 'final_GA_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS_big, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS_small, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=GA_KS['mutation_prob'])
    results_df.to_csv(output_location + 'final_GA_KS_small_attempt_8am.csv')

    '''

    ########### SA
    print("now doing SA")
    SA_4P = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.8),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    SA_FF = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.8),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_attempt_8am.csv')

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P_big, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_small_attempt_8am.csv')
    ''' more fitness by iteration calculations
    #results_df, curve_output_list = fitness_by_iter('SA', problem_FF, SA_FF['max_iters'], SA_FF['random_state']\
    #, schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    #results_df.to_csv(output_location + 'final_SA_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_FF_big, SA_FF['max_iters'], SA_FF['random_state']\
    , schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_FF_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_FF_small, SA_FF['max_iters'], SA_FF['random_state']\
    , schedule=SA_FF['schedule'], max_attempts=SA_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_FF_small_attempt_8am.csv')


    SA_4P = {
    'max_attempts':10,
    'schedule':mlrose.GeomDecay(init_temp=100, decay=0.8),
    'max_iters':max_iters_list_full,
    'random_state':rand_list_full
    }

    results_df, curve_output_list = fitness_by_iter('KS', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P_big, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_small_attempt_8am.csv')
    '''
    print("picking up where I left off on making the final curves..")

    SA_KS = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=1000, decay=0.99),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    ''' more fitness by iteration calculations
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS_big, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('SA', problem_KS_small, SA_KS['max_iters'], SA_KS['random_state']\
    , schedule=SA_KS['schedule'], max_attempts=SA_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_KS_small_attempt_8am.csv')
    '''

    RHC_KS = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS_big, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_KS_small, RHC_KS['max_iters'], RHC_KS['random_state']\
    , restarts=RHC_KS['restarts'], max_attempts=RHC_KS['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_KS_small_attempt_8am.csv')
    '''
    RHC_FF = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF_big, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_big_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_FF_small, RHC_FF['max_iters'], RHC_FF['random_state']\
    , restarts=RHC_FF['restarts'], max_attempts=RHC_FF['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_FF_small_attempt_8am.csv')
    '''

    RHC_4P = {
        'max_attempts': 50,
        'restarts': 20,
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P_small, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_small_attempt_8am.csv')
    results_df, curve_output_list = fitness_by_iter('RHC', problem_4P_big, RHC_4P['max_iters'], RHC_4P['random_state']\
    , restarts=RHC_4P['restarts'], max_attempts=RHC_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_RHS_4P_big_attempt_8am.csv')
    '''

    ## where it stopped
    print("I will now make the complexity curves for other algos")
    SA_4P_hacked = {
        'max_attempts': 10,
        'schedule': mlrose.GeomDecay(init_temp=100, decay=0.99),
        'max_iters': max_iters_list_full,
        'random_state': rand_list_full
    }
    '''
    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=SA_4P_hacked['schedule'], max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_decay_99.csv')

    results_df, curve_output_list = fitness_by_iter('SA', problem_4P, SA_4P['max_iters'], SA_4P['random_state']\
    , schedule=mlrose.GeomDecay(init_temp=1, decay=0.8), max_attempts=SA_4P['max_attempts'], curve=True)
    results_df.to_csv(output_location + 'final_SA_4P_T_1_decay_80.csv')

    results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=GA_KS['pop_size'], max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=0.1)
    results_df.to_csv(output_location + 'final_GA_KS_mutation_01.csv')
    '''
    results_df, curve_output_list = fitness_by_iter('GA', problem_KS, GA_KS['max_iters'], GA_KS['random_state']\
    , pop_size=100, max_attempts=GA_KS['max_attempts'], curve=True, mutation_prob=0.2)
    results_df.to_csv(output_location + 'final_GA_KS_mutation_02_pop_100.csv')

    ## Need a few more MIMIC chart inputs
    #print("Need a few more MIMIC chart inputs, so I will now make those")
    #print("Next FF p=100 keep=0.2")
    ''' MIMIC inputs for charts
    # generate N random 3d vectors whose values lie in the range [1, 10)
    coords = np.random.randint(1, 10, size=(N, 3))
    print(coords)
    if k == 2:
        coords[:, 0] *= -1
    elif k == 3:
        coords[:, 0] *= -1
        coords[:, 1] *= -1
    elif k == 4:
        coords[:, 1] *= -1

    # compute the distance between each and every point
    dist_list = [(i, j, round(np.linalg.norm(coords[i] - coords[j]), 3))
                 for i in range(N) for j in range(i + 1, N)]

    # define the fitness object and run the solver
    fitness_coords = mlrose.TravellingSales(distances=dist_list)
    problem_fit = mlrose.TSPOpt(length=len(coords),
                                fitness_fn=fitness_coords,
                                maximize=False)

    best_state, _, _ = mlrose.genetic_alg(problem_fit, random_state=2)

    with open('drone' + str(k) + '_points.txt', 'w') as out:
        # jugaad to avoid regex in parsing coords within controller script
        sys.stdout = out
        for i in coords:
            print(*i, sep=' ', end=';')
        print(*best_state, sep=' ', end='')
    sys.stdout = oldout
Esempio n. 10
0
import mlrose_hiive as mlrose
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from time import process_time
from sklearn.metrics import accuracy_score

print("Running TSP...")

prob_length = 50
np.random.seed(0)
coords_list = []
for n in range(prob_length):
    coords_list.append(np.random.rand(2))
fitness = mlrose.TravellingSales(coords=coords_list)
problem = mlrose.TSPOpt(prob_length, fitness)

RANDOM_SEED = 42
MAX_ATTEMPTS = 200

#%% tuning for SA
curve_list = []
decays = [0.999, 0.99, 0.9]
for d in decays:
    schedule = mlrose.GeomDecay(decay=d)
    _, _, curve = mlrose.simulated_annealing(
        problem,
        schedule=schedule,
        max_attempts=MAX_ATTEMPTS,
        max_iters=3000,
        curve=True,
Esempio n. 11
0
def runPart1(savePath):
    fitness = mlrose.FourPeaks(t_pct=0.15)
    init_state = None
    fourPeaksProblem = mlrose.DiscreteOpt(length=12,
        fitness_fn=fitness, maximize=True, max_val=2)

    part1_1 = Part1(name='Four Peaks', fitness=fitness,
                    problem=fourPeaksProblem, init_state=init_state)
    part1_1.runAll(savePath)

    fitness = mlrose.Queens()
    init_state = None
    eightQueensProblem = mlrose.DiscreteOpt(length=8,
        fitness_fn=fitness, maximize=False, max_val=8)
    part1_2 = Part1(name='Eight Queens', fitness=fitness,
                    problem=eightQueensProblem, init_state=init_state)
    part1_2.runAll(savePath)

    fitness = mlrose.SixPeaks(t_pct=0.15)
    init_state = None
    sixPeaksProblem = mlrose.DiscreteOpt(length=11,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_4 = Part1(name='Six Peaks', fitness=fitness,
                    problem=sixPeaksProblem, init_state=init_state)
    part1_4.runAll(savePath)

    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=7,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 7', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)


    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=100,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 100', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)

    fitness = mlrose.Queens()
    init_state = None
    eightQueensProblem = mlrose.DiscreteOpt(length=80,
        fitness_fn=fitness, maximize=False, max_val=8)
    part1_2 = Part1(name='Eighty Queens', fitness=fitness,
                    problem=eightQueensProblem, init_state=init_state)
    part1_2.runAll(savePath)

    fitness = mlrose.FlipFlop()
    init_state = None
    flipFlopProblem = mlrose.DiscreteOpt(length=15,
        fitness_fn=fitness, maximize=True, max_val=2)
    part1_5 = Part1(name='Flip Flop - 15', fitness=fitness,
                    problem=flipFlopProblem, init_state=init_state)
    part1_5.runAll(savePath)


    edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
    fitness = mlrose.MaxKColor(edges)
    init_state = None
    maxKColorsProblem = mlrose.DiscreteOpt(length=7,
        fitness_fn=fitness, maximize=False, max_val=2)
    part1_3 = Part1(name='Max-K Color', fitness=fitness,
                    problem=maxKColorsProblem, init_state=init_state)
    part1_3.runAll(savePath)

    # =============================================================
    #  Source - Tutorial from MLRose Docs
    #  https://mlrose.readthedocs.io/en/stable/source/tutorial2.html
    # 
    # =============================================================
    # Create list of city coordinates
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3)]

    # Initialize fitness function object using coords_list
    fitness_coords = mlrose.TravellingSales(coords = coords_list)

    # Create list of distances between pairs of cities
    dist_list = [(0, 1, 3.1623), (0, 2, 4.1231), (0, 3, 5.8310), (0, 4, 4.2426), \
                (0, 5, 5.3852), (0, 6, 4.0000), (0, 7, 2.2361), (1, 2, 1.0000), \
                (1, 3, 2.8284), (1, 4, 2.0000), (1, 5, 4.1231), (1, 6, 4.2426), \
                (1, 7, 2.2361), (2, 3, 2.2361), (2, 4, 2.2361), (2, 5, 4.4721), \
                (2, 6, 5.0000), (2, 7, 3.1623), (3, 4, 2.0000), (3, 5, 3.6056), \
                (3, 6, 5.0990), (3, 7, 4.1231), (4, 5, 2.2361), (4, 6, 3.1623), \
                (4, 7, 2.2361), (5, 6, 2.2361), (5, 7, 3.1623), (6, 7, 2.2361)]

    # Initialize fitness function object using dist_list
    fitness_dists = mlrose.TravellingSales(distances = dist_list)

    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length = 8, fitness_fn = fitness_coords, maximize=False)

    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3)]

    # Define optimization problem object
    problem_no_fit = mlrose.TSPOpt(length = 8, coords = coords_list, maximize=False)

    part1_6 = Part1(name='TSP', fitness=coords_list,
                    problem=problem_no_fit, init_state=None)
    part1_6.runAll(savePath)

    # Knapsack
    weights = np.random.randint(2, high=20, size=50)
    values = np.random.randint(2, high=100, size=50)
    max_weight_pct = 0.8
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)
    knapsackProblem = mlrose.DiscreteOpt(length=50,
        fitness_fn=fitness, maximize=False, max_val=2)

    part1_7 = Part1(name='Knapsack', fitness=fitness,
                    problem=knapsackProblem, init_state=None)
    part1_7.runAll(savePath)
Esempio n. 12
0
    def run_complexity(self, fitness_fn, mode=None):
        if mode == 1:
            self.run_ga_hyper_params(fitness_fn)
        elif mode == 2:
            self.run_rhc_hyper_params(fitness_fn)
        elif mode == 3:
            self.run_sa_hyper_params(fitness_fn)
        elif mode == 4:
            self.run_mimic_hyper_params(fitness_fn)
        elif not mode:
            fitness_name = fitness_fn.__class__.__name__
            print("Running %s" % fitness_name)
            init_states = {}
            knap_fitnesses = {}
            tsp_fitnesses = {}
            tries = 1
            for x in 2**np.arange(3, 9):
                n = int(x)
                fitness_dists = mlrose.TravellingSales(distances=get_coords(n))
                tsp_fitnesses[n] = fitness_dists
                edges = []
                for x in range(int(n * 0.75)):
                    a = r.randint(0, n - 1)
                    b = r.randint(0, n - 1)
                    while b == a:
                        b = r.randint(0, n - 1)
                    edges.append((a, b))

                fitness_fn_knap = mlrose.MaxKColor(edges=edges)
                init_states[n] = []
                knap_fitnesses[n] = fitness_fn_knap
                for y in range(tries):
                    init_states[n].append(get_init_state(n))

            for n, init_states_list in init_states.items():
                if fitness_name == 'MaxKColor':
                    fitness_fn = knap_fitnesses[n]
                if fitness_name == 'TravellingSales':
                    fitness_fn = tsp_fitnesses[n]
                print(n)
                print('%s: i=%d' % ('random_hill_climb', n))
                total_score = 0
                total_iter = 0
                start = time.time()
                for init_state in init_states_list:
                    problem = mlrose.DiscreteOpt(length=len(init_state),
                                                 fitness_fn=fitness_fn,
                                                 maximize=True)
                    if fitness_name == 'TravellingSales':
                        problem = mlrose.TSPOpt(length=n,
                                                fitness_fn=fitness_fn)
                    max_attempts = self.get_best_param(
                        problem=fitness_name,
                        algo='random_hill_climb',
                        param='max_attempts')
                    restarts = self.get_best_param(problem=fitness_name,
                                                   algo='random_hill_climb',
                                                   param='restarts')
                    best_state, best_fitness, curve = mlrose.random_hill_climb(
                        problem,
                        max_attempts=max_attempts,
                        max_iters=10000,
                        random_state=1,
                        curve=True,
                        restarts=restarts)
                    total_iter += len(curve)
                    total_score += np.mean(curve)
                end = time.time()
                print('The fitness at the best state is: ',
                      total_score / tries)
                self.track(problem=fitness_name,
                           algo='random_hill_climb',
                           i=n,
                           score=total_score / tries,
                           training_time=(end - start) / tries,
                           max_iter=total_iter / tries)

            for n, init_states_list in init_states.items():
                if fitness_name == 'MaxKColor':
                    fitness_fn = knap_fitnesses[n]
                if fitness_name == 'TravellingSales':
                    fitness_fn = tsp_fitnesses[n]
                print(n)
                print('%s: i=%d' % ('simulated_annealing', n))
                total_score = 0
                total_iter = 0
                start = time.time()
                for init_state in init_states_list:
                    problem = mlrose.DiscreteOpt(length=len(init_state),
                                                 fitness_fn=fitness_fn,
                                                 maximize=True)
                    if fitness_name == 'TravellingSales':
                        problem = mlrose.TSPOpt(length=n,
                                                fitness_fn=fitness_fn)
                    max_attempts = self.get_best_param(
                        problem=fitness_name,
                        algo='simulated_annealing',
                        param='max_attempts')
                    best_state, best_fitness, curve = mlrose.simulated_annealing(
                        problem,
                        max_attempts=max_attempts,
                        max_iters=10000,
                        random_state=1,
                        curve=True)
                    total_score += np.mean(curve)
                    total_iter += len(curve)
                end = time.time()
                print('The fitness at the best state is: ',
                      total_score / tries)
                self.track(problem=fitness_name,
                           algo='simulated_annealing',
                           i=n,
                           score=total_score / tries,
                           training_time=(end - start) / tries,
                           max_iter=total_iter / tries)

            for n, init_states_list in init_states.items():
                if fitness_name == 'MaxKColor':
                    fitness_fn = knap_fitnesses[n]
                if fitness_name == 'TravellingSales':
                    fitness_fn = tsp_fitnesses[n]
                print(n)
                print('%s: i=%d' % ('genetic_alg', n))
                total_score = 0
                total_iter = 0
                start = time.time()
                for init_state in init_states_list:
                    problem = mlrose.DiscreteOpt(length=len(init_state),
                                                 fitness_fn=fitness_fn,
                                                 maximize=True)
                    if fitness_name == 'TravellingSales':
                        problem = mlrose.TSPOpt(length=n,
                                                fitness_fn=fitness_fn)
                    mutation_prob = self.get_best_param(problem=fitness_name,
                                                        algo='genetic_alg',
                                                        param='mutation_prob')
                    pop_size = self.get_best_param(problem=fitness_name,
                                                   algo='genetic_alg',
                                                   param='pop_size')
                    best_state, best_fitness, curve = mlrose.genetic_alg(
                        problem,
                        pop_size=pop_size,
                        mutation_prob=mutation_prob,
                        max_iters=10000,
                        random_state=1,
                        curve=True)
                    total_score += np.mean(curve)
                    total_iter += len(curve)
                end = time.time()
                print('The fitness at the best state is: ',
                      total_score / tries)
                self.track(problem=fitness_name,
                           algo='genetic_alg',
                           i=n,
                           score=total_score / tries,
                           training_time=(end - start) / tries,
                           max_iter=total_iter / tries)

            for n, init_states_list in init_states.items():
                if fitness_name == 'MaxKColor':
                    fitness_fn = knap_fitnesses[n]
                if fitness_name == 'TravellingSales':
                    fitness_fn = tsp_fitnesses[n]
                print('%s: i=%d' % ('mimic', n))
                if n > 256:
                    break
                total_score = 0
                total_iter = 0
                start = time.time()
                for init_state in init_states_list:
                    problem = mlrose.DiscreteOpt(length=len(init_state),
                                                 fitness_fn=fitness_fn,
                                                 maximize=True)
                    if fitness_name == 'TravellingSales':
                        problem = mlrose.TSPOpt(length=n,
                                                fitness_fn=fitness_fn)
                    keep_pct = self.get_best_param(problem=fitness_name,
                                                   algo='mimic',
                                                   param='keep_pct')
                    pop_size = self.get_best_param(problem=fitness_name,
                                                   algo='mimic',
                                                   param='pop_size')
                    best_state, best_fitness, curve = mlrose.mimic(
                        problem,
                        max_iters=10000,
                        random_state=1,
                        curve=True,
                        pop_size=pop_size,
                        keep_pct=keep_pct,
                        max_attempts=10)
                    total_score += np.mean(curve)
                    total_iter += len(curve)
                end = time.time()
                print('The fitness at the best state is: ',
                      total_score / tries)
                self.track(problem=fitness_name,
                           algo='mimic',
                           i=n,
                           score=total_score / tries,
                           training_time=(end - start) / tries,
                           max_iter=total_iter / tries)
Esempio n. 13
0
    random.seed(seed)
    num_cities = 50
    random_list = list(itertools.product(range(0, 99), range(0, 99)))
    coords_list = random.sample(random_list, num_cities)

    # Initialize fitness function object using coords_list
    # fitness_coords = mlrose.TravellingSales(coords=coords_list)
    fitness = mlrose.CustomFitness(
        util.fit_eval_count(mlrose.TravellingSales, coords=coords_list))
    fitness.problem_type = 'tsp'

    SAschedule = mlrose.GeomDecay(init_temp=0.1, decay=0.5, min_temp=0.01)

    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length=num_cities,
                                fitness_fn=fitness,
                                maximize=True)

    run_ro_algos('TSP',
                 problem_fit,
                 gapop=600,
                 gamut=0.6,
                 mimpop=700,
                 mimpct=0.3,
                 seed=seed,
                 min=True,
                 sa_sched=SAschedule)

    # Knapsack - MIMIC's best
    print('Knapsack - MIMIC best')
    random.seed(seed)
Esempio n. 14
0
    plt.savefig(name)

# Problem definition
length = 35
eval_count = 0


# Initialize custom fitness function object
# Define list of inter-distances between each pair of the following cities (in order from 0 to 9):
# Rome, Florence, Barcelona, Paris, London, Amsterdam, Berlin, Prague, Budapest, Venice
distances = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3), (2, 3),
            (0, 4), (1, 4), (2, 4), (3, 4), (0, 5), (1, 5),
            (2, 5), (3, 5)]

fitness = mlrh.TravellingSales(coords=distances)
prob = mlrh.TSPOpt(length=14, fitness_fn=fitness, maximize=True)

experiment_name = "queen_prob"
output_directory = "queen"


# SA
sa = mlrh.SARunner(problem=prob,
                   experiment_name=experiment_name,
                   output_directory=output_directory,
                   seed=random_state,
                   max_attempts=200,
                   iteration_list=[2000],
                   temperature_list=[0.01, 0.1, 1, 10, 100, 1000],
                   decay_list=[mlrh.GeomDecay, mlrh.ExpDecay, mlrh.ArithDecay])
sa_stats, sa_curve = sa.run()
Esempio n. 15
0
def TSP(loop=False):
    rs = 1  #set random state
    ma = 200  #max_attempts
    # Create list of city coordinates
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5),
                   (2, 3)]

    #Generate 20 cities coordinate randomely (0-99)

    cities = 30  #10 cities random seed 60
    random.seed(6)
    #Generate 20 unique cities coordinates randomly
    random_list = list(itertools.product(range(0, 99), range(0, 99)))
    coords_list = random.sample(random_list, cities)
    print(coords_list)

    print(len(coords_list))
    # Initialize fitness function object using coords_list
    fitness_coords = mlrose.TravellingSales(coords=coords_list)

    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length=len(coords_list),
                                fitness_fn=fitness_coords,
                                maximize=True)

    # Solve problem using the genetic algorithm
    best_state, best_fitness = mlrose.genetic_alg(problem=problem_fit,
                                                  mutation_prob=0.2,
                                                  max_attempts=200,
                                                  random_state=rs)

    print(best_state)
    print(best_fitness)

    best_state, best_fitness = mlrose.random_hill_climb(problem_fit,
                                                        random_state=rs)

    print(best_state)
    print(best_fitness)

    best_state, best_fitness = mlrose.simulated_annealing(problem_fit,
                                                          random_state=rs)

    print(best_state)
    print(best_fitness)

    best_state, best_fitness = mlrose.mimic(problem_fit,
                                            keep_pct=0.2,
                                            random_state=rs)

    print(best_state)
    print(best_fitness)

    #iterations=[i*2 for i in range(0,50)]
    iterations = [5, 10, 30, 50, 100, 250, 500, 1000, 2000, 3000]
    GA_fitness = []
    RHC_fitness = []
    SA_fitness = []
    MIMIC_fitness = []

    GA_time = []
    RHC_time = []
    SA_time = []
    MIMIC_time = []

    if loop == True:

        for max_it in iterations:
            import time
            start_time = time.time()
            best_state, best_fitness = mlrose.genetic_alg(problem=problem_fit,
                                                          pop_size=200,
                                                          mutation_prob=0.2,
                                                          max_iters=1000,
                                                          random_state=rs)

            end_time = time.time()
            time = end_time - start_time
            GA_time.append(time)
            GA_fitness.append(1 / best_fitness)

            start_time = time.time()
            best_state, best_fitness = mlrose.random_hill_climb(
                problem_fit,
                max_iters=max_it,
                random_state=rs,
                max_attempts=ma)
            end_time = time.time()
            time = end_time - start_time
            RHC_time.append(time)
            RHC_fitness.append(1 / best_fitness)

            start_time = time.time()
            best_state, best_fitness = mlrose.simulated_annealing(
                problem_fit,
                max_iters=max_it,
                random_state=rs,
                max_attempts=ma)
            end_time = time.time()
            time = end_time - start_time
            SA_time.append(time)
            SA_fitness.append(1 / best_fitness)

            start_time = time.time()
            best_state, best_fitness = mlrose.mimic(problem_fit,
                                                    keep_pct=0.1,
                                                    max_iters=max_it,
                                                    random_state=rs,
                                                    max_attempts=ma)
            end_time = time.time()
            time = end_time - start_time
            MIMIC_time.append(time)
            MIMIC_fitness.append(1 / best_fitness)

        # plt.plot(nn_model.fitness_curve, 'b-')
        # plt.plot(iterations,train_accuracy,'bo-')
        # plt.plot(iterations,test_accuracy,'bo-',label='Accuracy')
        plt.title("GA Fitness loop")
        plt.plot(iterations, GA_fitness, 'r-', label='GA')
        plt.plot(iterations, RHC_fitness, 'b-', label='RHC')
        plt.plot(iterations, SA_fitness, '-', color='orange', label='SA')
        plt.plot(iterations, MIMIC_fitness, 'g-', label='MIMIC')
        plt.xlabel('Iterations')
        plt.ylabel('Fitness')
        plt.legend()
        plt.grid(True)
        plt.savefig("GA Fitness loop")
        plt.show()

        #Time
        plt.title("GA time loop")
        plt.plot(iterations, GA_time, 'r-', label='GA')
        plt.plot(iterations, RHC_time, 'b-', label='RHC')
        plt.plot(iterations, SA_time, '-', color='orange', label='SA')
        plt.plot(iterations, MIMIC_time, 'g-', label='MIMIC')
        plt.xlabel('Iterations')
        plt.ylabel('Time (s)')
        plt.legend()
        plt.grid(True)
        plt.savefig("GA time loop")
        plt.show()

    timecompare = []
    #Fitness curve
    import time
    start_time = time.time()
    best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
        problem=problem_fit,
        pop_size=200,
        mutation_prob=0.5,
        curve=True,
        max_attempts=ma,
        random_state=rs)
    end_time = time.time()
    timecompare.append((end_time - start_time))

    start_time = time.time()
    best_state, best_fitness, rhcfitness_curve = mlrose.random_hill_climb(
        problem=problem_fit, curve=True, max_attempts=ma, random_state=rs)
    end_time = time.time()
    timecompare.append((end_time - start_time))

    start_time = time.time()
    best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
        problem=problem_fit, curve=True, max_attempts=ma, random_state=rs)
    end_time = time.time()
    timecompare.append((end_time - start_time))

    start_time = time.time()
    best_state, best_fitness, mimicfitness_curve = mlrose.mimic(
        problem=problem_fit,
        pop_size=200,
        keep_pct=0.5,
        curve=True,
        max_attempts=ma,
        random_state=rs)
    end_time = time.time()
    timecompare.append((end_time - start_time))

    plt.figure()
    plt.title("TSP fitness vs iterations using 4 random algorithm")
    plt.plot(gafitness_curve, label='GA', color='r')
    plt.plot(rhcfitness_curve, label='RHC', color='b')
    plt.plot(safitness_curve, label='SA', color='orange')
    plt.plot(mimicfitness_curve, label='MIMIC', color='g')
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("TSP fitness curve")
    plt.show()

    # Plot time comparison
    plt.figure()
    algorithms = ['GA', 'RHC', 'SA', 'MIMIC']
    plt.bar(algorithms, timecompare)
    plt.title("Running time for TSP (seconds)")
    plt.ylabel('Time (s)')
    plt.xlabel('Random search algorithms')
    # plt.ylim(bottom=0,top=1.1)
    # plt.plot(train_size, score, 'o-', label='score')
    plt.tight_layout()
    i = 0
    for a in algorithms:
        plt.text(a,
                 timecompare[i] + 0.05,
                 '%.2f' % timecompare[i],
                 ha='center',
                 va='bottom',
                 fontsize=11)
        i += 1
    plt.savefig("Running time for TSP")
    plt.show()

    #MIMIC Fitness vs Iterations as cpt changes
    CPT = [0.1, 0.3, 0.5, 0.7, 0.9]
    plt.figure()
    for c in CPT:
        best_state, best_fitness, mimicfitness_curve = mlrose.mimic(
            problem_fit,
            keep_pct=c,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(mimicfitness_curve, label='pct = ' + str(c))

    plt.title("TSP using MIMIC with different values of pct parameter")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("MIMIC parameter")
    plt.show()

    #GA Fitness vs Iterations as mutation prob changes
    Mutate = [0.1, 0.3, 0.5, 0.7, 0.9]
    plt.figure()
    for m in Mutate:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
            problem_fit,
            mutation_prob=m,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(gafitness_curve, label='mutation = ' + str(m))

    plt.title("TSP using GA with  different values of mutation probability")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("GA parameter")
    plt.show()

    #SA Fitness vs Iterations as schedule changes
    #schedule = mlrose.GeomDecay(init_temp=10, decay=0.95, min_temp=1)

    init_temp = 1.0
    decay_r = [0.15, 0.35, 0.55, 0.75, 0.95]
    plt.figure()
    for d in decay_r:
        SAschedule = mlrose.GeomDecay(init_temp=10, decay=d, min_temp=1)
        best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
            problem_fit,
            schedule=SAschedule,
            max_attempts=ma,
            curve=True,
            random_state=rs)
        plt.plot(safitness_curve, label='decay rate = ' + str(d))

    plt.title("TSP using SA with different values of decay rate")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("SA parameter")
    plt.show()

    temps = [100000, 10000, 1000, 100, 10, 5]
    plt.figure()
    for t in temps:
        SAschedule = mlrose.GeomDecay(init_temp=t, decay=0.55, min_temp=1)
        best_state, best_fitness, safitness_curve = mlrose.simulated_annealing(
            problem_fit,
            schedule=SAschedule,
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(safitness_curve, label='Temperature = ' + str(t))

    plt.title("TSP using SA with different values of temperature")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("TSP SA temp")
    plt.show()

    Mutate = [0.1, 0.1, 0.1, 0.1, 0.1]
    pop = [50, 100, 200, 300, 400]
    Mutatepop = [(100, 0.2), (100, 0.5), (100, 0.7), (200, 0.2), (200, 0.5),
                 (200, 0.7), (300, 0.2), (300, 0.5), (300, 0.7)]
    plt.figure()
    for m in Mutatepop:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(
            problem_fit,
            pop_size=m[0],
            mutation_prob=m[1],
            curve=True,
            max_attempts=ma,
            random_state=rs)
        plt.plot(gafitness_curve,
                 label='pop size = ' + str(m[0]) + ', mutation = ' + str(m[1]))

    plt.title("TSP using GA with  different parameters")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("GA parameter mutate pop")
    plt.show()