Ejemplo n.º 1
0
def main():
    name_of_exp = "One Max"
    # Create list of city coordinates
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3)]
    mimic = []
    # Initialize fitness function object using coords_list
    fitness_coords = mlrose.TravellingSales(coords=coords_list)
    problem = mlrose.TSPOpt(length=8, fitness_fn=fitness_coords,
                            maximize=False)
    z_s = ['RHC', 'SA', 'GA', 'MIMIC']
    for i in [0.1,0.2,0.3,0.4,0.5]:
        best_state, best_fitness, learning_curve, timing_curve = mlrose.genetic_alg(problem, pop_size=100,
                                                                              mutation_prob=i,
                                                                              max_attempts=100,
                                                                              max_iters=100, curve=True,
                                                                              random_state=1)
        mimic.append(learning_curve)
        print(i)
        print(best_fitness)
    for x, z in zip([0.1,0.2,0.3,0.4,0.5], mimic):
        plt.plot(z, label=str(x))
    plt.legend()
    plt.title('GA Randomized Optimization MutationProb vs Fitness Curve (TSP)')
    plt.xlabel('Function iteration count')
    plt.ylabel('Fitness function value')
    plt.show()
Ejemplo n.º 2
0
    def ga(self):
        print("Creating GA curve")
        problem, _ = self.get_prob(t_pct=0.15)
        plt.close()
        plt.figure()
        for pop_size in [100, 200, 500]:
            for mutation_prob in [0.1, 0.2, 0.5]:
                _, _, fitness_curve = mlrose.genetic_alg(
                    problem,
                    mutation_prob=mutation_prob,
                    pop_size=pop_size,
                    max_attempts=100,
                    max_iters=5000,
                    curve=True)
                plt.plot(fitness_curve,
                         label="mutation_prob={}, pop_size={}".format(
                             mutation_prob, pop_size))

        plt.title("{}: Genetic Algorithm".format(self.prob_name))
        plt.legend(loc="best")
        plt.xlabel('Number of Iterations')
        plt.ylabel('Fitness')
        plt.savefig(
            os.path.join(self.output_path,
                         "{}_GA Analysis.png".format(self.prob_name)))
Ejemplo n.º 3
0
def gera_grafo(df) -> Grafo:
    global grafo

    grafo = Grafo(df)

    for i in range(len(colunas)):
        # Pega o nome da cidade pelo índice
        cidade_origem = colunas[i]
        for j in range(len(colunas)):
            # Pega o peso e o destino da aresta e adiciona no objeto Grafo
            destino = df.index[j]
            peso = df.iloc[j][cidade_origem]
            #aresta = dict(rota=(cidade_origem, destino), peso=peso)
            if peso != '-':
                aresta = (int(cidade_origem), int(destino), int(peso))
                grafo.adiciona_arestas(aresta)

    fitness = mlrose.TravellingSales(distances=grafo.arestas)
    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length=8, fitness_fn=fitness, maximize=False)
    best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                  mutation_prob=0.2,
                                                  max_attempts=100,
                                                  random_state=2)
    print(fitness)
    return grafo
Ejemplo n.º 4
0
    def test_genetic_alg_discrete_max():
        """Test genetic_alg function for a discrete maximization problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        _, _, curve = genetic_alg(problem, max_attempts=50, timing=True)

        assert (curve.shape[1] == 2)
Ejemplo n.º 5
0
def find_best_param_ga_pop_size(seed, problem):

    pop_size = [10, 20, 40, 60, 80]

    # convert int to str
    pop_size_string = []
    for size in pop_size:
        pop_size_string.append(str(size))

    fitness_list = []
    iterations = [1, 250, 500, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000]
    score = []

    for size in pop_size:
        for iter in iterations:
            best_state, best_fitness = mlrose.genetic_alg(problem,
                                                                  max_attempts=10,
                                                                  max_iters=iter,
                                                                  random_state=seed,
                                                                  pop_size=size)
            score.append(best_fitness)

        fitness_list.append(np.mean(score))
        print("when size = ", size, "  the mean score is " , np.mean(score))

    ga_parameter_curve(pop_size_string, fitness_list, "Pop_size", 0.4, "FP")
Ejemplo n.º 6
0
def rank(fnames, save=True):

    dist_mat = np.load(fnames['dmat'])
    dist_list = mat2tuples(dist_mat)

    # define fitness function object
    fitness_dists = mlrose.TravellingSales(distances=dist_list)

    # define optimization problem object
    n = dist_mat.shape[0]
    problem_fit = mlrose.TSPOpt(length=n,
                                fitness_fn=fitness_dists,
                                maximize=False)

    # solve problem using the genetic algorithm
    best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                  mutation_prob=0.2,
                                                  max_attempts=100,
                                                  random_state=2)

    # retrieve ranked list
    cand = load(fnames['cand'])
    ranked_cand = cand.loc[best_state]

    # save the output
    fname_ranked = None
    if save:
        fname, ext = os.path.splitext(fnames['cand'])
        fname_ranked = fname + '_ranked' + ext
        write(fname_ranked, ranked_cand)
        print('Ordered candidates saved to {}'.format(fname_ranked))

    return fname_ranked
Ejemplo n.º 7
0
def encuentra_circuito(ls_basicas):

    n_bas = [i for i in range(len(ls_basicas))]
    comb = itertools.combinations(n_bas, 2)

    dist_list = []
    for i in comb:
        costo = aptitud2(ls_basicas[i[0]], ls_basicas[i[1]])
        dist_list.append((i[0], i[1], costo))
        #print(ls_basicas[i[0]], ls_basicas[i[1]], costo)

    # Initialize fitness function object using dist_list
    fitness_dists = mlrose.TravellingSales(distances=dist_list)
    problem_fit = mlrose.TSPOpt(length=len(ls_basicas),
                                fitness_fn=fitness_dists,
                                maximize=True)
    best_state, best_fitness = mlrose.genetic_alg(problem_fit, random_state=2)
    #print(best_state)
    #print(best_fitness)

    # =============================================================================
    #     for i in best_state:
    #         print(ls_basicas[i])
    # =============================================================================
    return list(best_state), best_fitness / 2
Ejemplo n.º 8
0
def gen_alg(prob, **kwargs):
    start = time.time()
    _, best_score, curve, fit_evals = mlrose.genetic_alg(prob(),
                                                         curve=True,
                                                         **kwargs)
    end = time.time()
    return np.array([best_score, len(curve), fit_evals, end - start])
Ejemplo n.º 9
0
def find_route(n, matrix, time_limit):
    good = []
    for i in range(1, 2**(n - 1)):
        bin = binary(i, n - 1) + "1"
        iArr = []
        posArr = []
        for j in range(n - 1):
            if (bin[j] == '1'):
                iArr.append(j)
                posArr.append(locations[j])

        if (len(iArr) > 1):
            new_mat = getMat(iArr, matrix)
            dist_list = []
            for j in range(len(iArr)):
                for k in range(j + 1, len(iArr)):
                    dist_list.append((j, k, new_mat[j][k]))

            fitness_dists = mlrose.TravellingSales(distances=dist_list)
            problem_fit = mlrose.TSPOpt(length=len(iArr),
                                        fitness_fn=fitness_dists,
                                        maximize=False)
            best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                          random_state=2)

            best_state_fr = []
            for k in range(len(iArr)):
                best_state_fr.append(iArr[best_state[k]])

            if (best_fitness < time_limit):
                good.append(best_state_fr)

    return good
Ejemplo n.º 10
0
    def __compute_shortest_path(self, starting_zone_id, zones_ids):
        """
        Compute the shortest path within a sequence of geo-localized zones by solving the
        Travelling Salesperson Problem (TSP) optimization problem with a genetic algorithm.
        """
        zones = [starting_zone_id] + zones_ids
        coords_list = []

        # Extract zones coordinates
        grid_h = self.sim_input.grid_matrix.shape[0]

        for zone in zones:
            c = int(np.floor(zone / grid_h))
            r = int(zone - c * grid_h)

            coords_list.append((c, r))

        # Solve the TSP optimization problem
        tsp_problem = TSPOpt(length=len(coords_list),
                             coords=coords_list,
                             maximize=False)

        best_path, _ = genetic_alg(tsp_problem, max_iters=10, random_state=2)
        best_path = deque(best_path)

        # Rotate the path to its best form until
        # the starting point P is not 0
        P = best_path[0]

        while P != 0:
            best_path.rotate(1)
            P = best_path[0]

        return [zones[i] for i in list(best_path)]
Ejemplo n.º 11
0
def max_iterations(n,cords,fit,temp,mint):
    best_fit1=[]
    best_fit2=[]
    best_fit3=[]
    max_iter=[]
    for i in range(100,n,10):
        best_state,best_fitness1 = mlrose.genetic_alg(problem_fit, mutation_prob = 0.2, max_attempts = 100,max_iters=i)
        best_state,best_fitness2 = mlrose.mimic(problem_fit,pop_size=200,keep_pct=0.3,max_attempts=10,max_iters=i)
        best_state,best_fitness3 = mlrose.simulated_annealing(problem_fit,schedule=mlrose.GeomDecay(init_temp=temp,decay=0.9,min_temp=mint),max_attempts=100,max_iters=i,init_state=None)
        max_iter.append(i)
        best_fit1.append(best_fitness1)
        best_fit2.append(best_fitness2)
        best_fit3.append(best_fitness3)
        print(best_fit1,best_fit2,best_fit3)
    max_iter=np.asarray(max_iter)
    best_fit1=np.asarray(best_fit1)
    best_fit2=np.asarray(best_fit2)
    best_fit3=np.asarray(best_fit3)
    line1, = plt.plot(max_iter,best_fit1,color='r',label='fitness_score')
    line2, = plt.plot(max_iter,best_fit2,color='g',label='fitness_score')
    line3, = plt.plot(max_iter,best_fit3,color='b',label='fitness_score')
    plt.ylabel('Fitness_score')
    plt.xlabel('Number of iterations')
    plt.show()
    return None
Ejemplo n.º 12
0
def calculate_route(super_matrix, shopping_list):
    shopping_list.append("0")  # go through enterance
    bfs, routes = calc_distance_between_all_list(super_matrix, shopping_list)
    bfs = sorted(bfs)
    (new_input, input_converter_dict) = input_converter(bfs)

    # Initialize fitness function object using dist_list
    fitness_dists = mlrose.TravellingSales(distances=new_input)
    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length=len(shopping_list),
                                fitness_fn=fitness_dists,
                                maximize=False)
    # Solve problem using the genetic algorithm
    # best_state, best_fitness = mlrose.genetic_alg(problem_fit, random_state = 2)
    best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                  mutation_prob=0.2,
                                                  max_attempts=200,
                                                  pop_size=200,
                                                  random_state=0)

    output_converter_dict = dict(
        (v, k) for k, v in input_converter_dict.items())
    final_result = [output_converter_dict[x] for x in best_state]
    register_index = final_result.index("0")
    final_result = final_result[register_index:] + final_result[:register_index]
    final_route = []

    for (k, v) in zip(final_result, final_result[1:] + final_result[:1]):
        for (x, y, z) in routes:
            if k == x and v == y:
                final_route += z
            elif k == y and v == x:
                final_route += z[::-1]

    return final_route, items_coor(super_matrix, shopping_list)
Ejemplo n.º 13
0
def find_best_param_ga_pop_size(seed, problem):

    pop_size = [10, 20, 45, 65, 85, 100]

    # convert int to str
    pop_size_string = []
    for size in pop_size:
        pop_size_string.append(str(size))

    fitness_list = []
    iters = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
    score = []

    for size in pop_size:
        for iter in iters:
            best_state, best_fitness = mlrose.genetic_alg(problem,
                                                          max_attempts=10,
                                                          max_iters=iter,
                                                          random_state=seed,
                                                          pop_size=size)
            score.append(best_fitness)
            print(
                "Travel Salesman Genetic Algorithm pop_size Fitness {}".format(
                    best_fitness))

        fitness_list.append(np.mean(score))
        print("when size = ", size, "  the mean score is ", np.mean(score))

    ga_parameter_curve(pop_size_string, fitness_list, "Pop_size", 0.4, "TSP")
Ejemplo n.º 14
0
def run_ga(prob, value_range, num_runs):
    avgs = []
    times = []

    for value in value_range:
        problem = prob(value)
        print("\t\tValue: " + str(value))

        run_vals = []
        run_times = []

        for run in range(0, num_runs):
            print("\t\t\tRun " + str(run))
            start = timeit.default_timer()

            best_state, best_fitness = mlrose.genetic_alg(problem,
                                                          max_attempts=20,
                                                          mutation_prob=0.25)

            stop = timeit.default_timer()
            total_time = stop - start

            run_vals.append(best_fitness)
            run_times.append(total_time)

        avgs.append(np.mean(run_vals))
        times.append(np.mean(run_times))

    return avgs, times
Ejemplo n.º 15
0
def tsp(dd,l):
    state=[]
    for i in range(0,len(dd)):
      fitness_dists = mlrose.TravellingSales(distances = dd[i])
      problem_fit = mlrose.TSPOpt(length = len(l[i]), fitness_fn = fitness_dists, maximize=False)
      best_state, best_fitness = mlrose.genetic_alg(problem_fit, random_state = 2)
      state.append(best_state.tolist())
    return(state)
Ejemplo n.º 16
0
    def run_ga(self):
        iters = max(self.iterations)
        self._setup()

        for pc in self.population_sizes:
            for mr in self.mutation_rates:
                np.random.seed(self.seed)
                start = time.perf_counter()
                mlrose.genetic_alg(
                    self.problem,
                    mutation_prob=mr,
                    max_attempts=self.max_attempts,
                    pop_size=pc,
                    max_iters=int(iters),
                    curve=self.generate_curves,
                    random_state=self.seed,
                    state_fitness_callback=self.iteration_callback_,
                    callback_user_info=(pc, mr, start))
Ejemplo n.º 17
0
    def test_genetic_alg_continuous_min():
        """Test genetic_alg function for a continuous minimization problem"""

        problem = ContinuousOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = genetic_alg(problem, max_attempts=200)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.allclose(best_state, x, atol=0.5) and best_fitness < 1)
Ejemplo n.º 18
0
    def test_genetic_alg_discrete_min():
        """Test genetic_alg function for a discrete minimization problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = genetic_alg(problem, max_attempts=50)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.array_equal(best_state, x) and best_fitness == 0)
Ejemplo n.º 19
0
def api_tsp():
    # Check if an ID was provided as part of the URL.
    # If ID is provided, assign it to a variable.
    # If no ID is provided, display an error in the browser.
    places_to_visit = []
    if 'places' in request.args:
        temp = request.args['places']
        splitItems = temp.split(",")
        # listTemp = [int(i) for i in splitItems]
        print(request.args['places'])
        places_to_visit = splitItems
        # id = int(request.args['id'])
    else:
        return "Error: No id field provided. Please specify an id."

    # Create an empty list for our results
    results = []

    distance_matrix=[]
    for i in range(len(places_to_visit)):
        for j in range(i+1,len(places_to_visit)):
            origin=places_to_visit[i]
            dest=places_to_visit[j]
            key=(origin,dest)

            req='https://maps.googleapis.com/maps/api/distancematrix/json?origins=place_id:'+origin+'&destinations=place_id:'+dest+'&key='+my_API_key
            Response = requests.get(req)
            Response=Response.json()

            #pprint.pprint(Response["rows"][0]["elements"][0]["duration"]["value"])
            value=Response["rows"][0]["elements"][0]["duration"]["value"] #We get the time duration is seconds
            new_element=(i,j,value)
            distance_matrix.append(new_element)
    fitness = mlrose.TravellingSales(distances = distance_matrix)
    # We want to visit all the places of our list "places_to_visit"
    problem = mlrose.TSPOpt(length = len(places_to_visit), fitness_fn = fitness,
                                maximize=False)

    best_state, best_fitness = mlrose.genetic_alg(problem, random_state = 0)
    ordered_places_to_visit=[]
    for i in best_state:
      ordered_places_to_visit.append(places_to_visit[i])
    print(places_to_visit)

    temp = {}
    temp['results'] = ordered_places_to_visit
    results.append(temp)
    # Loop through the data and match results that fit the requested ID.
    # IDs are unique, but other fields might return many results
    # for book in books:
    #     if book['id'] in listTemp:
    #         results.append(book)

    # Use the jsonify function from Flask to convert our list of
    # Python dictionaries to the JSON format.
    return jsonify(results)
Ejemplo n.º 20
0
def genetic(problem, init_state, max_attempts, iterations):
    genA_best_state, genA_best_fitness = mlrose.genetic_alg(
        problem,
        pop_size=200,
        mutation_prob=0.1,
        max_attempts=max_attempts,
        max_iters=iterations,
        curve=False,
        random_state=1)
    return genA_best_fitness
Ejemplo n.º 21
0
    def genetic(self):
        problem_no_fit = mlrose.TSPOpt(length=len(self.coord),
                                       coords=self.coord,
                                       maximize=False)
        best_state, best_fitness = mlrose.genetic_alg(problem_no_fit,
                                                      mutation_prob=0.2,
                                                      max_attempts=100,
                                                      random_state=2)

        return best_state
Ejemplo n.º 22
0
def tsp_problem():
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5),
                   (2, 3)]
    fitness_coords = mlrose.TravellingSales(coords=coords_list)

    # Define optimization problem object
    problem_fit = mlrose.TSPOpt(length=8,
                                fitness_fn=fitness_coords,
                                maximize=True)

    best_state, best_fitness = mlrose.genetic_alg(problem_fit, random_state=2)

    print(best_state)
    print(best_fitness)

    best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                  mutation_prob=0.2,
                                                  max_attempts=100,
                                                  random_state=2)
Ejemplo n.º 23
0
    def GeneticAlgorithm(self):
        start = time.time()
        KSbest_state, KSbest_fitness, KScurve = genetic_alg(self.knapsack,
                                                            curve=True)
        gaKST = time.time() - start

        start = time.time()
        FPbest_state, FPbest_fitness, FPcurve = genetic_alg(self.fourpeaks,
                                                            curve=True,
                                                            max_attempts=20,
                                                            pop_size=400)
        gaFPT = time.time() - start

        start = time.time()
        CObest_state, CObest_fitness, COcurve = genetic_alg(self.countones,
                                                            curve=True)
        gaCOT = time.time() - start

        return KSbest_fitness, FPbest_fitness, CObest_fitness, gaKST, gaFPT, gaCOT, KScurve, FPcurve, COcurve
def fit(length, fitness):
    problem = mlrose.DiscreteOpt(length = length, fitness_fn = fitness, maximize = True, max_val = 2)

    iterations = [10,50,100,200,400,800,1600,3200]
    RHC, SA, GA, MM = ([],[],[],[])
    time_RHC, time_SA, time_GA, time_MM = ([],[],[],[])

    for iter in iterations:
        print ("max iterations = " + str(iter))
        start_time = time.time()
        best_fitness = 0
        for times in range(10):
          best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts = 10, max_iters = iter, restarts = 0, init_state = np.random.randint(2, size=(length,)))
          best_fitness = max(best_fitness, best_fitness)
          #print(best_state)
        RHC.append(best_fitness)
        print(best_fitness)
        time_RHC.append((time.time() - start_time)/10)
        
        start_time = time.time()
        best_fitness = 0
        for times in range(10):
          best_state, best_fitness = mlrose.simulated_annealing(problem, schedule = mlrose.GeomDecay(), max_attempts = 10, max_iters = iter, init_state = np.random.randint(2, size=(length,)))
          best_fitness = max(best_fitness, best_fitness)
          #print(best_state)
        SA.append(best_fitness)
        print(best_fitness)
        time_SA.append((time.time() - start_time)/10)

        start_time = time.time()
        best_fitness = 0
        best_state, best_fitness = mlrose.genetic_alg(problem, pop_size = 200, mutation_prob = 0.1, max_attempts = 10, max_iters = iter)
        #print(best_state)
        GA.append(best_fitness)
        print(best_fitness)
        time_GA.append((time.time() - start_time))

        start_time = time.time()
        best_fitness = 0
        best_state, best_fitness = mlrose.mimic(problem, pop_size = 200, keep_pct = 0.2, max_attempts = 10, max_iters = iter)
        #print(best_state)
        MM.append(best_fitness)
        print(best_fitness)
        time_MM.append((time.time() - start_time))
    
    plot(RHC, SA, GA, MM, time_RHC, time_SA, time_GA, time_MM, iterations)
    filewrite_array("iterations:", iterations)
    filewrite_array("Fitness(RHC):", RHC)
    filewrite_array("Fitness(SA):", SA)
    filewrite_array("Fitness(GA):", GA)
    filewrite_array("Fitness(MM):", MM)
    filewrite_array("Fitness(time_RHC):", time_RHC)
    filewrite_array("Fitness(time_SA):", time_SA)
    filewrite_array("Fitness(time_GA):", time_GA)
    filewrite_array("Fitness(time_MM):", time_MM)
Ejemplo n.º 25
0
def optimization(problem):
    df_optim = pd.DataFrame(columns=algos)
    df_iter = pd.DataFrame(columns=algos)
    df_time = pd.DataFrame(columns=algos)

    for rs in random_states:
        # RHC
        tic = time.process_time()
        best_state_rhc, best_fitness_rhc, curve_rhc = \
            mlrose.random_hill_climb(problem, max_attempts=20, max_iters=100000,
                                     restarts=0, init_state=None, curve=True, random_state=rs)
        toc = time.process_time()
        time_rhc = toc - tic

        # SA
        tic = time.process_time()
        best_state_sa, best_fitness_sa, curve_sa = \
            mlrose.simulated_annealing(problem, schedule=mlrose.ExpDecay(init_temp=1.0, exp_const=0.005, min_temp=0.001),
                                       max_attempts = 20, max_iters = 100000,
                                       init_state = None, curve = True, random_state = rs)
        toc = time.process_time()
        time_sa = toc - tic

        # GA
        tic = time.process_time()
        best_state_ga, best_fitness_ga, curve_ga = \
            mlrose.genetic_alg(problem, pop_size=200, mutation_prob=0.1, max_attempts=20, max_iters=100000,
                               curve=True, random_state=rs)
        toc = time.process_time()
        time_ga = toc - tic

        # MIMIC
        tic = time.process_time()
        best_state_m, best_fitness_m, curve_m = \
            mlrose.mimic(problem, pop_size=20, keep_pct=0.2, max_attempts=20, max_iters=100000,
                         curve=True, random_state=rs, fast_mimic=False)
        toc = time.process_time()
        time_m = toc - tic

        # df
        df_optim.loc[len(df_optim)] = [
            best_fitness_rhc, best_fitness_sa, best_fitness_ga, best_fitness_m
        ]
        df_iter.loc[len(df_iter)] = [
            len(curve_rhc),
            len(curve_sa),
            len(curve_ga),
            len(curve_m)
        ]
        df_time.loc[len(df_time)] = [time_rhc, time_sa, time_ga, time_m]
        print(rs)

    return (df_optim.mean(axis=0), df_iter.mean(axis=0), df_time.mean(axis=0))
Ejemplo n.º 26
0
 def _run_with_ga(self, problem):
     fitness_curve = []
     if self.curve:
         fitted_weights, loss, fitness_curve = genetic_alg(
             problem,
             pop_size=self.pop_size,
             mutation_prob=self.mutation_prob,
             max_attempts=self.max_attempts
             if self.early_stopping else self.max_iters,
             max_iters=self.max_iters,
             curve=self.curve)
     else:
         fitted_weights, loss, _ = genetic_alg(
             problem,
             pop_size=self.pop_size,
             mutation_prob=self.mutation_prob,
             max_attempts=self.max_attempts
             if self.early_stopping else self.max_iters,
             max_iters=self.max_iters,
             curve=self.curve)
     return fitness_curve, fitted_weights, loss
Ejemplo n.º 27
0
def calc_shortest_route_mlrose(
        systems: Iterable[Dict]) -> Tuple[List[Dict], float]:
    # See https://mlrose.readthedocs.io/en/stable/source/tutorial2.html#
    fitness_distances = mlrose.TravellingSales(
        distances=calc_distances(systems))
    problem_fit = mlrose.TSPOpt(length=len(systems),
                                fitness_fn=fitness_distances,
                                maximize=False)
    best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                  random_state=20,
                                                  max_attempts=20)
    return ([systems[index] for index in best_state], best_fitness)
Ejemplo n.º 28
0
    def _cal_route(self):
        """ based on TSP solver """

        dist_list = self._build_dist_list()
        fitness_dists = mlrose.TravellingSales(distances=dist_list)
        problem_fit = mlrose.TSPOpt(length=len(self._places),
                                    fitness_fn=fitness_dists,
                                    maximize=False)
        best_state, best_fitness = mlrose.genetic_alg(problem_fit,
                                                      random_state=2)

        return best_state
Ejemplo n.º 29
0
def solve_ga(problem_fit):
    """ Solving using Genetic Algorithm """

    fitness_min = np.inf
    best_params_dict = {}

    # Parameter lists
    populuation_list = [100, 500, 1000]
    mutation_prob_list = [0.01, 0.1, 0.5]

    # Solve
    for p in populuation_list:
        for m in mutation_prob_list:
            # Start timer
            t_start = time.time()
            best_state, best_fitness, fitness_curve = mlrose.genetic_alg(
                problem=problem_fit,
                pop_size=p,
                mutation_prob=m,
                max_attempts=100,
                max_iters=5000,
                random_state=np.random.seed(7),
                curve=True)
            # End timer
            t_end = time.time()
            # Storing the best result
            if best_fitness < fitness_min:
                fitness_min = best_fitness
                best_params_dict['Fitness'] = best_fitness
                best_params_dict['Solution'] = best_state
                best_params_dict['Population'] = p
                best_params_dict['Mutation'] = m
                best_params_dict['Fitness_curve'] = fitness_curve
                best_params_dict['Time'] = round(t_end - t_start, 2)

    # Printing the best result
    print("-- Parameters --")
    print("\tPopulation: ", best_params_dict['Population'])
    print("\tMutation probability: ", best_params_dict['Mutation'])
    print("\tMax Attempts at each step: 100")
    print("\tStopping Criterion = Max Iterations of the algorithm: 1000")
    print("Results:")
    print("\tSolution (Order of city traversal by index):  ",
          best_params_dict['Solution'])
    print("\tFitness: ", round(best_params_dict['Fitness'], 2))
    print("Computational Time: ", best_params_dict['Time'], " seconds\n\n")

    # Plotting
    plt.plot(-(best_params_dict['Fitness_curve']))
    plt.title("Convergence curve: TSP-Qatar using Genetic Algorithm")
    plt.xlabel("Iterations")
    plt.ylabel("Fitness")
    plt.savefig("tsp_qatar_ga.png")
Ejemplo n.º 30
0
def final_test(problem, ga, sa, rhc, mimic, trials=50):
    ga_samples = []
    sa_samples = []
    rhc_samples = []
    mimic_samples = []
    ga_time_samples = []
    sa_time_samples = []
    rhc_time_samples = []
    mimic_time_samples = []

    for _ in range(trials):
        start = time.time()
        _, ga_fitness, _ = mlrose.genetic_alg(problem,
                                              pop_size=ga[0],
                                              mutation_prob=ga[1])
        ga_time_samples.append(time.time() - start)
        start = time.time()
        _, sa_fitness, _ = mlrose.simulated_annealing(problem, sa)
        sa_time_samples.append(time.time() - start)
        start = time.time()
        _, rhc_fitness, _ = mlrose.random_hill_climb(problem, rhc)
        rhc_time_samples.append(time.time() - start)
        start = time.time()
        _, mimic_fitness, _ = mlrose.mimic(problem,
                                           pop_size=mimic[0],
                                           keep_pct=mimic[1])
        mimic_time_samples.append(time.time() - start)

        ga_samples.append(ga_fitness)
        sa_samples.append(sa_fitness)
        rhc_samples.append(rhc_fitness)
        mimic_samples.append(mimic_fitness)

    fitness_name = repr(problem.fitness_fn).split('.')[-1].split(' ')[0]
    if fitness_name == 'CustomFitness':
        fitness_name = 'Saw'
    print(f'Final results on {fitness_name}')
    print()
    print(f'GA max: {np.max(ga_samples)}')
    print(f'SA max: {np.max(sa_samples)}')
    print(f'RHC max: {np.max(rhc_samples)}')
    print(f'MIMIC max: {np.max(mimic_samples)}')
    print()
    print(f'GA mean: {np.mean(ga_samples)}')
    print(f'SA mean: {np.mean(sa_samples)}')
    print(f'RHC mean: {np.mean(rhc_samples)}')
    print(f'MIMIC mean: {np.mean(mimic_samples)}')
    print()
    print(f'GA mean execution time: {np.mean(ga_time_samples)}')
    print(f'SA mean execution time: {np.mean(sa_time_samples)}')
    print(f'RHC mean execution time: {np.mean(rhc_time_samples)}')
    print(f'MIMIC mean execution time: {np.mean(mimic_time_samples)}')