def call_mlrose_curve(algorith_keyword, problem, pop_size=200, mutation_prob=0.1, max_attempts=10, max_iters=np.inf, curve=False\
, random_state=None, schedule=mlrose.GeomDecay(), init_state=None, restarts=0, keep_pct=0.2, fast_mimic=True):
    if algorith_keyword == 'RHC':
        best_state, best_fitness, curve_output = mlrose.random_hill_climb(problem, max_attempts=max_attempts, max_iters=max_iters\
        , restarts=restarts, init_state=init_state, curve=curve, random_state=random_state)
    elif algorith_keyword == 'GA':
        best_state, best_fitness, curve_output = mlrose.genetic_alg(problem, pop_size=pop_size, mutation_prob=mutation_prob\
        , max_attempts=max_attempts, max_iters=max_iters, curve=curve, random_state=random_state)
    elif algorith_keyword == 'SA':
        best_state, best_fitness, curve_output = mlrose.simulated_annealing(problem, schedule=schedule, max_attempts=max_attempts\
        ,max_iters=max_iters, init_state=init_state, curve=curve, random_state=random_state)
    elif algorith_keyword == 'MIMIC':
        print("problem: ", problem, "\npop_size: ", pop_size, "\n",
              "keep_pct: ", keep_pct)
        print("max_attempts: ", max_attempts, "\nmax_iters: ", max_iters,
              "\nrandom_state: ", random_state, "\nfast_mimic: ", fast_mimic)
        best_state, best_fitness, curve_output = mlrose.mimic(problem, pop_size=pop_size, keep_pct=keep_pct\
        , max_attempts=max_attempts, max_iters=max_iters, curve=curve, random_state=random_state)
        print("best_fitness: ", best_fitness)
    else:
        print(
            "\n\nIncorrect 'algorithm_keyword'. Please check the input to the 'call_mlrose' function.\n\n"
        )
        best_state, best_fitness, curve_output = 'incorrect key word', 'incorrect key word', 'incorrect key word'
    return best_state, best_fitness, curve_output
示例#2
0
文件: order.py 项目: pn51/FOQUS
def rank(fnames, ga_max_attempts=25):
    """return fnames ranked"""
    dist_mat = np.load(fnames["dmat"])
    dist_list = mat2tuples(dist_mat)

    # define fitness function object
    fitness_dists = mlrose.TravellingSales(distances=dist_list)

    # define optimization problem object
    n_len = dist_mat.shape[0]
    problem_fit = mlrose.TSPOpt(length=n_len, fitness_fn=fitness_dists, maximize=False)

    # solve problem using the genetic algorithm
    best_state = mlrose.genetic_alg(
        problem_fit, mutation_prob=0.2, max_attempts=ga_max_attempts, random_state=2
    )[0]

    # retrieve ranked list
    cand = load(fnames["cand"])
    ranked_cand = cand.loc[best_state]

    # save the output
    fname, ext = os.path.splitext(fnames["cand"])
    fname_ranked = fname + "_ranked" + ext
    write(fname_ranked, ranked_cand)
    _log.info("Ordered candidates saved to %s", fname_ranked)

    return fname_ranked
示例#3
0
def runComplexityGenetic(pType, problem):
    if pType == 'One Max':
        neighbor = 50
        populationSize = 800
        iterations = 1000
        mutation = 0.001
    elif pType == 'Flip Flop':
        neighbor = 36
        populationSize = 1000
        iterations = 1000
        mutation = 0.001
    else:
        neighbor = 50
        populationSize = 250
        iterations = 10000
        mutation = 0.15
    s = time()
    # best_state, best_fitness = mlrose.genetic_alg(problem,
    #                                                 pop_size=populationSize,
    #                                                 mutation_prob=mutation,
    #                                                 max_attempts=neighbor,
    #                                                 max_iters= iterations,
    #                                                 random_state=1)
    best_state, best_fitness, c = hive.genetic_alg(problem,
                                                   pop_size=populationSize,
                                                   mutation_prob=mutation,
                                                   max_attempts=neighbor,
                                                   max_iters=iterations,
                                                   random_state=1)
    timeTaken = time() - s
    return best_fitness, timeTaken
def gen_alg(opt):
    global count
    count = 0
    gen_loss_list = []
    gen_train_acc_list = []
    gen_test_acc_list = []
    for population in range(200, 4001, 200):
        best_state_spam, best_fitness_spam, _ = mlrose.genetic_alg(
            opt, pop_size=population, curve=True)
        train_predict_gen = predict(best_state_spam, train_features_spam_norm)
        test_predict_gen = predict(best_state_spam, test_features_spam_norm)
        train_accuracy_hill = accuracy_score(train_labels_spam,
                                             train_predict_gen)
        test_accuracy_hill = accuracy_score(test_labels_spam, test_predict_gen)
        gen_loss_list.append(best_fitness_spam)
        gen_train_acc_list.append(train_accuracy_hill)
        gen_test_acc_list.append(test_accuracy_hill)
    plt.figure(figsize=(10, 6))
    plt.subplot(121)
    plt.plot(np.arange(200, 4001, 200), gen_loss_list, label='-1*loss')
    plt.xlabel('population num')
    plt.ylabel('minus of loss')
    plt.title('loss versus population num')
    plt.legend(loc='lower right')
    plt.subplot(122)
    plt.plot(np.arange(200, 4001, 200), gen_train_acc_list, label='train')
    plt.plot(np.arange(200, 4001, 200), gen_test_acc_list, label='test')
    plt.xlabel('population num')
    plt.ylabel('accuracy')
    plt.title('accuracy versus population num')
    plt.legend(loc='lower right')
    plt.show()
示例#5
0
def compare_methods(num_char):
    global count
    count=0
    fitness_obj=mlrose.CustomFitness(heter_string_fn)
    opt=mlrose.DiscreteOpt(num_char,fitness_obj,maximize=True,max_val=num_char)
    best_state_climb,best_fitness_climb,fitness_curve_climb=mlrose.random_hill_climb(opt,curve=True)
    print('---------------------random hill climb-------------------------')
    print('hill climbing best state for heter-string problem:',best_state_climb)
    print('hill climbing best fitness for heter-string problem:',best_fitness_climb)
    print('hill climbing fitting curve for heter-string problem:',fitness_curve_climb)
    print('number of fitness call used:',count)
    count=0
    print('-------------------simulated annealing-------------------------')
    best_state_ann,best_fitness_ann,fitness_curve_ann=mlrose.simulated_annealing(opt,schedule=mlrose.ExpDecay(),curve=True)
    print('simulated annealing best state for heter-string problem:',best_state_ann)
    print('simulated annealing best fitness for heter-string problem:',best_fitness_ann)
    print('simulated annealing fitting curve for heter-string problem:',fitness_curve_ann)
    print('number of fitness call used:',count)
    count=0
    best_state_ga,best_fitness_ga,fitness_curve_ga=mlrose.genetic_alg(opt,pop_size=200, mutation_prob=0.5,curve=True)
    print('---------------------genetic alg----------------------------')
    print('genetic algorithm best state for heter-string problem:',best_state_ga)
    print('genetic algorithm best fitnees for heter-string problem:',best_fitness_ga)
    print('genetic algorithm fitness curve for heter-string problem:',fitness_curve_ga)
    print('number of fitness call used:',count)
    count=0
    best_state_mimic,best_fitness_mimic,fitness_curve_mimic=mlrose.mimic(opt,pop_size=200,curve=True)
    print('------------------------mimic-------------------------------')
    print('mimic best state for heter-string problem:',best_state_mimic)
    print('mimic best fitness value for heter-string problem:',best_fitness_mimic)
    print('mimic curve for heter-string problem:',fitness_curve_mimic)
    print('number of fitness call used:',count)
    count=0
    plt.figure(figsize=(10,10))
    plt.subplot(221)
    plt.plot(fitness_curve_climb)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.ylim(20,50)
    plt.title('random hill climb')
    plt.subplot(222)
    plt.plot(fitness_curve_ann)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.ylim(20,50)
    plt.title('simulated annealing')
    plt.subplot(223)
    plt.plot(fitness_curve_ga)
    plt.ylim(20,50)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.title('genetic algorithm')
    plt.subplot(224)
    plt.plot(fitness_curve_mimic)
    plt.ylim(20,50)
    plt.title('mimic')
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.show()
示例#6
0
def gen(problem, init_state, max_attempts, max_iters):
    best_state, best_fitness, fitness_curve  = mlrose_hiive.genetic_alg(problem, pop_size=200, mutation_prob=0.1, 
                                                          max_attempts = max_attempts, max_iters = max_iters, curve=True, random_state = 1)
    print('genetic')
    print(best_state)
    print(best_fitness)
#     print(fitness_curve)
    return best_state, best_fitness, fitness_curve
示例#7
0
    def test_genetic_alg_discrete_min():
        """Test genetic_alg function for a discrete minimization problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = genetic_alg(problem, max_attempts=50)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.array_equal(best_state, x) and best_fitness == 0)
示例#8
0
    def test_genetic_alg_continuous_min():
        """Test genetic_alg function for a continuous minimization problem"""

        problem = ContinuousOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = genetic_alg(problem, max_attempts=200)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.allclose(best_state, x, atol=0.5) and best_fitness < 1)
示例#9
0
def tsp(distances=None, num_of_nodes=None):
    '''https://mlrose.readthedocs.io/en/stable/source/tutorial2.html'''

    fitness_coordinates = TravellingSales(distances=distances)
    problem_fit = TSPOpt(length=num_of_nodes,
                         fitness_fn=fitness_coordinates,
                         maximize=False)
    best_route, _ = genetic_alg(problem_fit, random_state=2)
    return best_route
示例#10
0
def ga(problem, iterations, random_seed, graph_file, graph_title):
    mutation_prob = [0.1, 0.2, 0.3, 0.4, 0.5]
    best_score = []
    time_taken = []
    fn_evals_taken = []
    global eval_count
    for m in mutation_prob:
        fitness = []
        fit_time = []
        fn_evals = []
        for i in iterations:
            eval_count = 0
            start = datetime.datetime.now()
            best_state, best_fitness, _ = mlrose_hiive.genetic_alg(problem, mutation_prob=m,
                                                                max_iters=i, random_state=random_seed)
            finish = datetime.datetime.now()
            fitness.append(best_fitness)
            fit_time.append((finish - start).total_seconds())
            fn_evals.append(eval_count)
        # Find the best score achieved in that mutation prob
        best_score.append(max(fitness))
        index = fitness.index(max(fitness))
        # find the time that was taken to achieve that
        time_taken.append(fit_time[index])
        fn_evals_taken.append(fn_evals[index])
        plt.plot(iterations, fitness, label="Mutation = " + str(m))

    plt.legend(loc="best", title='Mutation Probability')
    plt.grid()
    generate_graph(graph_file + "ga", graph_title + "Genetic Algorithm", "Iterations", "Fitness")

    # Decays best_score and time_taken
    plt.plot(mutation_prob, best_score)
    plt.grid()
    generate_graph(graph_file + "ga_mut", graph_title + "Genetic Algorithm",
                   "Mutation Probability", "Best Score Achieved")

    """
    plt.plot(mutation_prob, time_taken)
    plt.grid()
    generate_graph("cp_sa_decay_time", "Continuous Peaks - Genetic Algorithm", "Mutation Probability",
                   "Time taken to achieve that")
    """

    plt.scatter(time_taken, best_score)
    for i, txt in enumerate(mutation_prob):
        plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
    plt.legend(loc='best', title='Mutation Probability')
    plt.grid()
    generate_graph(graph_file + "ga_scatter", graph_title + "Genetic Algorithm",
                   "Time Taken", "Best Score achieved")

    print('Mutation prob: ', mutation_prob)
    print('Best scores reached: ', best_score)
    print('Time taken to do that: ', time_taken)
    print('Function evaluations taken: ', fn_evals_taken)
def genetic_algorithm(problem_fit, vectorLength, data_dir, iterlist):
    directory = "./" + data_dir + "/curves/"
    if not os.path.exists(directory):
        os.makedirs(directory)
    path1 = './' + data_dir
    path2 = "./" + data_dir + "/curves/"

    prl = []

    beststate = []
    bestfit = []
    curve = []
    time = []
    iterlistn = []

    for pr in np.linspace(0.1, 1, 5):
        for iters in iterlist:
            start = clock()
            best_state, best_fitness, train_curve = mlrose.genetic_alg(problem_fit, \
                                                                    mutation_prob = pr,\
                                                                    max_iters=int(iters),
                                                                    curve=True, random_state=randomSeed)
            end = clock()
            iterlistn.append(int(iters))
            time.append(end - start)
            beststate.append(best_state)
            bestfit.append(best_fitness)
            prl.append(pr)
            curve.append(train_curve)
            if (verbose == True):
                print(pr)
                print(int(iters))
                print(best_state)
                print(best_fitness)

    ffga = pd.DataFrame({
        'Mutation Probability': prl,
        'Best Fitness': bestfit,
        'Iterations': iterlistn,
        'Time': time
    })
    beststatedf = pd.DataFrame(0.0,
                               index=range(1, vectorLength + 1),
                               columns=range(len(beststate)))
    for i in range(len(curve)):
        curvedf = pd.DataFrame(curve[i])
        curvedf.to_csv(
            os.path.join(path2,
                         'gacurve{}_{}.csv'.format(prl[i], iterlistn[i])))

    for i in range(1, len(beststate) + 1):
        beststatedf.loc[:, i] = beststate[i - 1]

    ffga.to_csv(os.path.join(path1, 'ga.csv'))
    beststatedf.to_csv(os.path.join(path1, 'gastates.csv'))
示例#12
0
def compare_multi_round_k_color():
    global count
    count = 0
    fitness_obj = mlrose.CustomFitness(k_color_fit)
    opt = mlrose.DiscreteOpt(50, fitness_obj, maximize=True, max_val=8)
    fitness_list_rhc = []
    fitness_list_ann = []
    fitness_list_genetic = []
    fitness_list_mimic = []
    num_sample_rhc = []
    num_sample_ann = []
    num_sample_genetic = []
    num_sample_mimic = []
    for i in range(20):
        best_state_climb, best_fitness_climb, fitness_curve_climb = mlrose.random_hill_climb(
            opt, curve=True)
        fitness_list_rhc.append(best_fitness_climb)
        num_sample_rhc.append(count)
        count = 0
        best_state_ann, best_fitness_ann, fitness_curve_ann = mlrose.simulated_annealing(
            opt, schedule=mlrose.ExpDecay(), curve=True)
        fitness_list_ann.append(best_fitness_ann)
        num_sample_ann.append(count)
        count = 0
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            opt, pop_size=500, mutation_prob=0.5, curve=True)
        fitness_list_genetic.append(best_fitness_ga)
        num_sample_genetic.append(count)
        count = 0
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            opt, pop_size=500, curve=True)
        fitness_list_mimic.append(best_fitness_mimic)
        num_sample_mimic.append(count)
        count = 0
    plt.figure(figsize=(10, 6))
    plt.subplot(121)
    plt.plot(fitness_list_rhc, label='rhc')
    plt.plot(fitness_list_ann, label='ann')
    plt.plot(fitness_list_genetic, label='ga')
    plt.plot(fitness_list_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('finess value')
    plt.title('fitness value comparision')
    plt.legend(loc='lower right')
    plt.subplot(122)
    plt.plot(num_sample_rhc, label='rhc')
    plt.plot(num_sample_ann, label='ann')
    plt.plot(num_sample_genetic, label='ga')
    plt.plot(num_sample_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('fitness calls')
    plt.title('fitness call number comparision')
    plt.legend(loc='upper right')
    plt.show()
示例#13
0
def get_genetic(problem, pop_size=300, mutation_prob=0.4):

    best_state, best_fitness, fitness_curve = mlrose.genetic_alg(
        problem,
        pop_size=pop_size,
        mutation_prob=mutation_prob,
        max_attempts=100,
        max_iters=np.inf,
        curve=True,
        random_state=23)

    return best_state, best_fitness, fitness_curve
示例#14
0
def plot_optimization_problem_fitness(fitness_function, iterations, random_state, title):

    start_rhc = timeit.default_timer()
    rhc_best_state, rhc_best_fitness, rch_fitness_curve =  mlrose.random_hill_climb(fitness_function, max_iters=iterations, random_state= random_state, restarts=10, curve=True)
    rhc_elapsed = timeit.default_timer() - start_rhc

    start_sa = timeit.default_timer()
    sa_best_state, sa_best_fitness, sa_fitness_curve =  mlrose.simulated_annealing(fitness_function, max_iters=iterations, random_state= random_state, curve=True)
    sa_elapsed = timeit.default_timer() - start_sa

    start_ga = timeit.default_timer()
    ga_best_state, ga_best_fitness, ga_fitness_curve =  mlrose.genetic_alg(fitness_function, max_iters=iterations, random_state= random_state, curve=True)
    ga_elapsed = timeit.default_timer() - start_ga

    start_mimic = timeit.default_timer()
    mimic_best_state, mimic_best_fitness, mimic_fitness_curve =  mlrose.mimic(fitness_function, max_iters=iterations, random_state= random_state, curve=True)
    mimic_elapsed = timeit.default_timer() - start_mimic

    # Fill in arrays.
    rch_fitness_curve_bf = np.full(iterations, rhc_best_fitness)
    rch_fitness_curve_bf[:rch_fitness_curve.shape[0]] = rch_fitness_curve

    sa_fitness_curve_bf = np.full(iterations, sa_best_fitness)
    sa_fitness_curve_bf[:sa_fitness_curve.shape[0]] = sa_fitness_curve

    ga_fitness_curve_bf = np.full(iterations, ga_best_fitness)
    ga_fitness_curve_bf[:ga_fitness_curve.shape[0]] = ga_fitness_curve

    mimic_fitness_curve_bf = np.full(iterations, mimic_best_fitness)
    mimic_fitness_curve_bf[:mimic_fitness_curve.shape[0]] = mimic_fitness_curve

    # Plot the convergance times.
    plot_ro_algo_times(rhc_elapsed, ga_elapsed, sa_elapsed, mimic_elapsed, title)

    # Plot the fitness over iterations.
    fig = plt.figure(figsize=(8,6))

    plt.plot(rch_fitness_curve_bf, label="RHC")
    plt.plot(sa_fitness_curve_bf, label="SA")
    plt.plot(ga_fitness_curve_bf, label="GA")
    plt.plot(mimic_fitness_curve_bf, label="MIMIC")

    plt.xlabel("Number of Iterations")
    plt.xticks(np.arange(0.0, iterations, step=iterations / 10))

    plt.ylabel("Fitness Function")

    plt.title(title)
    plt.legend(prop={'size':13}, loc='lower right')

    plt.savefig('Charts/OptimizationProblems/' + title + '.png')
    plt.clf()
示例#15
0
    def get_order(self, distance_matrix) -> Tuple[np.ndarray, float]:
        n_locations = len(distance_matrix)
        distance_triples = _create_distance_triples(distance_matrix)
        fitness_dists = mlrose.TravellingSales(distances=distance_triples)
        problem_fit = mlrose.TSPOpt(length=n_locations,
                                    fitness_fn=fitness_dists,
                                    maximize=False)
        best_state, best_fitness, iterations = mlrose.genetic_alg(
            problem_fit, mutation_prob=0.2, max_attempts=100, random_state=2)

        best_state = super()._close_circle(
            super()._roll_state_in_order(best_state))
        return best_state, best_fitness
示例#16
0
 def find_best(self, pois: list, **kwargs):
     logging.debug('init bucket')
     bucket = Bucket(self.routing)
     logging.debug('compute distance matrix')
     self.compute_distances(pois, bucket)
     logging.debug('define fitness function')
     fitness_dists = mlrose.TravellingSales(distances=bucket.distances)
     logging.debug('define optimization problem')
     problem_fit = mlrose.TSPOpt(
         length=len(pois),
         fitness_fn=fitness_dists,
         maximize=False
     )
     logging.debug('run randomized optimization algorithm')
     best_state, best_fitness, fitness_curve = mlrose.genetic_alg(problem_fit, random_state=2, **kwargs)
     segments = bucket.segments(self._generate_keys(best_state))
     meters = sum([segment.path.distance for segment in segments])
     return Tour(segments, meters)
示例#17
0
def plot_GAmutpop(problem, problem_fit, max_attempts, gamut, maxIter, seed, min=False):
    Mutatepop = [200, 300, 400, 500, 600, 700, 800, 900, 1000]
    plt.figure()
    for m in Mutatepop:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(problem_fit, pop_size=m, mutation_prob=gamut,
                                                                       curve=True, max_attempts=max_attempts,
                                                                       max_iters=maxIter, random_state=seed)
        if min:
            gafitness_curve = np.array(gafitness_curve) * -1
        plt.plot(gafitness_curve, label='mut pop = ' + str(m))
        print(m, best_fitness)

    plt.title(problem + " - GA - Mutation Populations")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\" + problem + " - GA - Mutation Populations")
    plt.show()
示例#18
0
def plot_GAmut(problem, problem_fit, max_attempts, gapop, maxIter, seed, min=False):
    Mutate = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    plt.figure()
    for m in Mutate:
        best_state, best_fitness, gafitness_curve = mlrose.genetic_alg(problem_fit, mutation_prob=m, curve=True,
                                                                       max_iters=maxIter, max_attempts=max_attempts,
                                                                       pop_size=gapop, random_state=seed)
        if min:
            gafitness_curve = np.array(gafitness_curve) * -1
        plt.plot(gafitness_curve, label='mut prob =' + str(m))
        print(m, best_fitness)

    plt.title(problem + " - GA - Mutation Probabilities")
    plt.xlabel('Iterations')
    plt.ylabel('Fitness')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\" + problem + " - GA - Mutation Probabilities")
    plt.show()
示例#19
0
文件: main.py 项目: aarushig14/OMSCS
def plot_ga_graph(prob_type):
    try:
        os.mkdir("./GA")
    except FileExistsError:
        pass
    except OSError as error:
        print("Error creating directory results.")

    print('\n plot_ga_graph - Progress: ', end="")

    fitness, problem = get_fitness_function(prob_type)
    process_time = []
    fitness_score = []
    population = [100, 200, 300, 500, 700, 1000, 1500]
    for i in population:
        print(".", end="")
        start = time.time()
        best_state, best_fitness, fitness_curve = mlrose.genetic_alg(
            problem,
            pop_size=i,
            mutation_prob=__MUTATION_PROBABILITY,
            max_attempts=__MAX_ATTEMPTS[prob_type],
            max_iters=__MAX_ITERS,
            random_state=10)
        fitness_score.append(best_fitness)
        process_time.append(time.time() - start)

    plt.figure(300)
    plt.plot(fitness_score)
    plt.xticks(np.arange(len(population)), [str(p) for p in population])
    plt.xlabel("pop_size constant")
    plt.ylabel("fitness score")
    plt.title('Genetic Algorithm')
    plt.savefig("GA/" + "Fitness.png")

    plt.figure(301)
    plt.plot(process_time)
    plt.xticks(np.arange(len(population)), [str(p) for p in population])
    plt.xlabel("pop_size constant")
    plt.ylabel("time")
    plt.title('GA')
    plt.savefig("GA/" + "Time.png")
示例#20
0
def genetic_algorithm(problem, init_state, max_attempts, max_iters):
    start_time = time.time()
    # Does the kind of mutation/algo matter?
    best_state, best_fitness, fitness_curve = mlr.genetic_alg(
        problem,
        pop_size=200,
        mutation_prob=0.1,
        max_attempts=max_attempts,
        max_iters=max_iters,
        curve=True,
        random_state=RANDOM_STATE)
    end_time = time.time()
    total_time = end_time - start_time

    print('Genetic Algorithm')
    print("Elapsed Time", total_time)
    #print(best_state)
    print(best_fitness)
    #     print(fitness_curve)
    return best_state, best_fitness, fitness_curve, total_time
示例#21
0
def run_GA_1(problem, init_state, **kwargs):
    fit_vals = []
    fit_curves = []
    times = []
    fevals = []

    # run multiple times to get average
    for random_state in random_states:
        start = time.time()
        _, best_fit, fit_curve, evals = genetic_alg(problem,
                                                    random_state=random_state,
                                                    curve=True,
                                                    fevals=True,
                                                    pop_size=120,
                                                    mutation_prob=0.12,
                                                    **kwargs)

        fit_vals.append(best_fit)
        fit_curves.append(fit_curve)
        times.append(time.time() - start)
        fevals.append(sum(evals.values()))

    # plot average fitness value
    # now = datetime.now()
    # dt_string = now.strftime("%Y-%m-%d-%H-%M-%S")
    # hack for ease of naming
    problem_name = str(problem.fitness_fn).split('.')[-1].split(' ')[0]
    # chart_name = f"charts/ga_{problem_name}_{len(init_state)}_{dt_string}"

    # plt.plot(average_curves(fit_curves), label="ga")
    # plt.title(f"GA {problem_name} ({len(init_state)})")
    # plt.xlabel("step")
    # plt.ylabel("fitness")
    # plt.savefig(chart_name)
    # plt.show()

    avg_fit = np.average(fit_vals)
    avg_time = round(np.mean(times), 2)
    avg_evals = np.mean(fevals)
    print(f"GA {problem_name}: {avg_fit}: {avg_time}: {avg_evals}")
    return avg_fit, avg_time, avg_evals
示例#22
0
def compare_gen_mimic():
    fitness_obj = mlrose.CustomFitness(n_peak_fit)
    opt = mlrose.DiscreteOpt(1, fitness_obj, maximize=True, max_val=10000)
    global count
    count = 0
    iter_num_counter_ga = []
    fitness_list_ga = []
    iter_num_counter_mimic = []
    fitness_list_mimic = []
    for i in range(20):
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            opt, pop_size=20, mutation_prob=0.5, curve=True)
        iter_num_counter_ga.append(count)
        fitness_list_ga.append(best_fitness_ga)
        count = 0
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            opt, pop_size=20, curve=True)
        iter_num_counter_mimic.append(count)
        fitness_list_mimic.append(best_fitness_mimic)
        count = 0
    plt.figure(figsize=(8, 6))
    plt.subplot(121)
    plt.plot(fitness_list_ga, label='ga')
    plt.plot(fitness_list_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('finess value')
    plt.title('fitness value comparision')
    plt.legend(loc='lower right')
    plt.subplot(122)
    plt.plot(iter_num_counter_ga, label='ga')
    plt.plot(iter_num_counter_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('fitness call no.')
    plt.title('fitness call number comparision')
    plt.legend(loc='upper right')
    plt.show()
示例#23
0
        schedule=schedule,
        max_attempts=attempts,
        random_state=seed,
        curve=True,
        init_state=init)
    print(f"        best SA State: {best_sa_state}")
    sub_end = time.time()
    sa_fitnesses.append(best_sa_fitness)
    sa_times.append(sub_end - sub_start)

    ## Genetic Algorithm
    sub_start = time.time()
    best_ga_state, best_ga_fitness, ga_curve = mlr.genetic_alg(
        problem,
        pop_size=200,
        max_attempts=attempts,
        random_state=seed,
        curve=True,
        elite_dreg_ratio=0.5,
        mutation_prob=0.4)
    print(f"        best GA State: {best_ga_state}")
    sub_end = time.time()
    ga_fitnesses.append(best_ga_fitness)
    ga_times.append(sub_end - sub_start)

    ## MIMIC
    sub_start = time.time()
    best_m_state, best_m_fitness, m_curve = mlr.mimic(problem,
                                                      pop_size=200,
                                                      max_attempts=attempts,
                                                      random_state=seed,
                                                      curve=True)
    # generate N random 3d vectors whose values lie in the range [1, 10)
    coords = np.random.randint(1, 10, size=(N, 3))
    print(coords)
    if k == 2:
        coords[:, 0] *= -1
    elif k == 3:
        coords[:, 0] *= -1
        coords[:, 1] *= -1
    elif k == 4:
        coords[:, 1] *= -1

    # compute the distance between each and every point
    dist_list = [(i, j, round(np.linalg.norm(coords[i] - coords[j]), 3))
                 for i in range(N) for j in range(i + 1, N)]

    # define the fitness object and run the solver
    fitness_coords = mlrose.TravellingSales(distances=dist_list)
    problem_fit = mlrose.TSPOpt(length=len(coords),
                                fitness_fn=fitness_coords,
                                maximize=False)

    best_state, _, _ = mlrose.genetic_alg(problem_fit, random_state=2)

    with open('drone' + str(k) + '_points.txt', 'w') as out:
        # jugaad to avoid regex in parsing coords within controller script
        sys.stdout = out
        for i in coords:
            print(*i, sep=' ', end=';')
        print(*best_state, sep=' ', end='')
    sys.stdout = oldout
示例#25
0
文件: tsp.py 项目: xiangxa/CS7641
plt.ylabel("Fitness")
plt.title("TSP: Fitness curve vs decay rate in SA")
plt.savefig("output/tsp_sa_decay.png")
plt.close()

print(df.max())

#%% tuning for GA
curve_list = []
pop_sizes = [100, 200, 300]
for p in pop_sizes:
    _, _, curve = mlrose.genetic_alg(
        problem,
        max_attempts=MAX_ATTEMPTS,
        max_iters=3000,
        pop_size=p,
        elite_dreg_ratio=1,
        curve=True,
        random_state=RANDOM_SEED,
    )
    curve_list.append(curve)

df = 1 / pd.DataFrame(curve_list).transpose()
df.columns = pop_sizes
df.plot()
plt.xlabel("Iteration")
plt.ylabel("Fitness")
plt.title("TSP: Fitness curve vs population size in GA")
plt.savefig("output/tsp_ga_pop.png")
plt.close()
示例#26
0
文件: queens.py 项目: abagde93/CS7641

columns = ['Time', 'Fitness', 'Population Size', 'Mutation Rate']
df=pd.read_csv("./queen/queen_prob/ga__queen_prob__run_stats_df.csv")
print(df[columns].sort_values(by=['Fitness'], ascending=False))


max_attempts = 20
max_iters = 25
mutation_prob=0.5
pop_size = 10
eval_count = 0
best_state, best_fitness, gen_curve = mlrh.genetic_alg(prob,
                                                   max_attempts=max_attempts,
                                                   max_iters=max_iters,
                                                   random_state=random_state,
                                                   pop_size=pop_size,
                                                   mutation_prob=mutation_prob,
                                                   curve=True)
print("Genetic Alg - Total Function Evaluations:", eval_count)
plot_fitness_iteration('fitness_iteration_ga_queens.png',gen_curve,
                       "Queens - Genetic Alg: mutation_prob: {}, pop_size: {}".format(mutation_prob, pop_size))


# MIMIC
mim = mlrh.MIMICRunner(problem=prob,
                       experiment_name=experiment_name,
                       output_directory=output_directory,
                       seed=random_state,
                       population_sizes=[50, 100, 200],
                       keep_percent_list=[0.1, 0.25, 0.5, 0.75],
示例#27
0
    'onemax': mlrose.OneMax(),
    #    'path': mlrose.CustomFitness(path, problem_type='discrete'),
    'flipflop': mlrose.FlipFlop(),
    #    'cliffs': mlrose.CustomFitness(cf1, problem_type='discrete'),
    #    'cliffs': mlrose.CustomFitness(is_larger, problem_type='discrete'),
    #    'max2color': mlrose.MaxKColorGenerator.generate(seed=42, number_of_nodes=PROBLEM_LENGTH, max_colors=2),
    #    'mod': mlrose.CustomFitness(cf2, problem_type='discrete')
}

RANDOM_STATE = 42
DEFAULTS = {'random_state': RANDOM_STATE, 'curve': True, 'max_attempts': 10}

ALGORITHMS = {
    'rhc': lambda p: mlrose.random_hill_climb(p, **DEFAULTS),
    'sa': lambda p: mlrose.simulated_annealing(p, **DEFAULTS),
    'ga': lambda p: mlrose.genetic_alg(p, **DEFAULTS),
    'mimic': lambda p: mlrose.mimic(p, **DEFAULTS)
}

results = []

PART_1 = True
PART_2 = True

if PART_1:
    for f_name, fitness in FITNESS_FUNCS.items():
        evaluate_fitness(f_name, fitness, f_name == 'max2color')
        alg2curve = {}
        overall_best_fitness = -1
        for alg_name, alg in ALGORITHMS.items():
            if f_name == 'max2color':
示例#28
0
def method_compare(opt, train_features_spam_norm, train_labels_spam,
                   test_features_spam_norm, test_labels_spam):
    #best_state_spam,best_fitness_spam,fitness_curve=mlrose.simulated_annealing(opt,schedule=mlrose.ExpDecay(),curve=True)
    #best_state_spam,best_fitness_spam,fitness_curve=mlrose.genetic_alg(opt,pop_size=2000,curve=True)

    global count
    count = 0
    loss_list_rhc = []
    loss_list_ann = []
    loss_list_ga = []

    train_acc_list_rhc = []
    train_acc_list_ann = []
    train_acc_list_ga = []
    test_acc_list_rhc = []
    test_acc_list_ann = []
    test_acc_list_ga = []

    fitness_call_list_rhc = []
    fitness_call_list_ann = []
    fitness_call_list_ga = []
    # ten rounds of rhc
    for i in range(10):
        count = 0
        best_state_spam, best_fitness_spam, fitness_curve = mlrose.random_hill_climb(
            opt, restarts=70, curve=True)
        loss_list_rhc.append(best_fitness_spam)
        train_predict_rhc = predict(best_state_spam, train_features_spam_norm)
        test_predict_rhc = predict(best_state_spam, test_features_spam_norm)
        train_acc_list_rhc.append(
            accuracy_score(train_labels_spam, train_predict_rhc))
        test_acc_list_rhc.append(
            accuracy_score(test_labels_spam, test_predict_rhc))
        fitness_call_list_rhc.append(count)
    #ten rounds of simulated annealing
    for i in range(10):
        count = 0
        best_state_spam, best_fitness_spam, _ = mlrose.simulated_annealing(
            opt, schedule=mlrose.ExpDecay(exp_const=0.003), curve=True)
        loss_list_ann.append(best_fitness_spam)
        train_predict_ann = predict(best_state_spam, train_features_spam_norm)
        test_predict_ann = predict(best_state_spam, test_features_spam_norm)
        train_acc_list_ann.append(
            accuracy_score(train_labels_spam, train_predict_ann))
        test_acc_list_ann.append(
            accuracy_score(test_labels_spam, test_predict_ann))
        fitness_call_list_ann.append(count)
    #ten rounds of genetic algorithm
    for i in range(10):
        count = 0
        best_state_spam, best_fitness_spam, _ = mlrose.genetic_alg(
            opt, pop_size=1000, curve=True)
        loss_list_ga.append(best_fitness_spam)
        train_predict_ga = predict(best_state_spam, train_features_spam_norm)
        test_predict_ga = predict(best_state_spam, test_features_spam_norm)
        train_acc_list_ga.append(
            accuracy_score(train_labels_spam, train_predict_ga))
        test_acc_list_ga.append(
            accuracy_score(test_labels_spam, test_predict_ga))
        fitness_call_list_ga.append(count)

    #plot loss curve
    plt.figure(figsize=(6, 6))
    plt.plot(np.arange(1, 11), loss_list_rhc, label='rhc')
    plt.plot(np.arange(1, 11), loss_list_ann, label='s_ann')
    plt.plot(np.arange(1, 11), loss_list_ga, label='ga')
    plt.xlabel('rounds')
    plt.ylabel('-1*losss')
    plt.title('loss versus different algorithm')
    plt.legend()
    plt.show()

    #plot acc curve
    plt.figure(figsize=(15, 6))
    plt.subplot(131)
    plt.plot(np.arange(1, 11), train_acc_list_rhc, label='train')
    plt.plot(np.arange(1, 11), test_acc_list_rhc, label='test')
    plt.xlabel('rounds')
    plt.ylabel('accuracy')
    plt.title('rhc')
    plt.legend()
    plt.subplot(132)
    plt.plot(np.arange(1, 11), train_acc_list_ann, label='train')
    plt.plot(np.arange(1, 11), test_acc_list_ann, label='test')
    plt.xlabel('rounds')
    plt.ylabel('accuracy')
    plt.title('simulated annealing')
    plt.legend()
    plt.subplot(133)
    plt.plot(np.arange(1, 11), train_acc_list_ga, label='train')
    plt.plot(np.arange(1, 11), test_acc_list_ga, label='test')
    plt.xlabel('rounds')
    plt.ylabel('accuracy')
    plt.title('genetic algorithm')
    plt.legend()

    #plot fitness call
    plt.figure(figsize=(6, 6))
    plt.plot(np.arange(1, 11), fitness_call_list_rhc, label='rhc')
    plt.plot(np.arange(1, 11), fitness_call_list_ann, label='s_ann')
    plt.plot(np.arange(1, 11), fitness_call_list_ga, label='ga')
    plt.xlabel('rounds')
    plt.ylabel('fitness call number')
    plt.title('fitness call num versus different algorithm')
    plt.legend()
    plt.show()
示例#29
0
def n_queens_ga(nq_problem, max_iters=np.inf, num_runs=20, verbose=False):
    # HP to vary
    hp_name = 'pop_mate_pct'
    hp_values = [0.25, 0.50, 0.75]

    # other hyperparameters for genetic algorithm
    population_size = 200
    elite_dreg_ratio = 0.95
    mutation_prob = 0.1

    # run for each hp value and append results to list

    fitness_dfs = []
    runs = np.arange(num_runs)

    for hp_value in hp_values:
        pop_mate_pct = hp_value  # set varied HP at beginning of loop

        run_times = np.zeros(num_runs)
        fitness_data = pd.DataFrame()

        for run in runs:
            run_t0 = time()
            best_state, best_fitness, fitness_curve = mlrose.genetic_alg(
                problem=nq_problem,
                pop_size=population_size,
                pop_breed_percent=pop_mate_pct,
                elite_dreg_ratio=elite_dreg_ratio,
                mutation_prob=mutation_prob,
                max_attempts=10,
                max_iters=max_iters,
                curve=True,
            )
            run_time = time() - run_t0
            run_times[run] = run_time

            fitness_data = pd.concat(
                [fitness_data, pd.DataFrame(fitness_curve)],
                axis=1,
                sort=False)

        fitness_data.columns = runs
        fitness_data = fitness_data.fillna(method='ffill')
        fitness_dfs.append(fitness_data)

        # calculate and print avg time per run
        avg_run_time = np.average(run_times)
        print("N-Queens - GA avg run time,", hp_value, hp_name, ":",
              avg_run_time)

    # generate plots
    plot_title = "N-Queens GA - " \
        + str(population_size) + " pop, " \
        + str(mutation_prob) + " mut prob, " \
        + ": fit vs iter"
    plotting.plot_fitness_curves(
        fitness_dfs=fitness_dfs,
        hp_values=hp_values,
        hp_name=hp_name,
        title=plot_title,
    )
    plt.savefig('graphs/n_queens_ga_fitness.png')
    plt.clf()

    return fitness_dfs
示例#30
0
def ga_optimization(size):
    algo = 'ga'
    problem = get_problem(size)

    # Gridsearch params
    max_iters = 500
    max_attempts_values = [10, 50, 100, 200]
    pop_size_values = [50, 100, 150, 200]
    pop_breed_percent_values = [0.5, 0.75]
    mutation_prob_values = [0.1, 0.3]
    n_runs = len(max_attempts_values) * len(pop_size_values) * len(pop_breed_percent_values) * len(mutation_prob_values)

    # Best vals
    pop_size, pop_breed_percent, mutation_prob, max_attempts = None, None, None, None
    fitness, curves, n_invocations, time = float('-inf'), [], 0, 0

    # Gridsearch
    global eval_count
    run_counter = 0
    for run_pop_size in pop_size_values:
        for run_pop_breed_percent in pop_breed_percent_values:
            for run_mutation_prob in mutation_prob_values:
                for run_max_attempts in max_attempts_values:
                    # Print status
                    run_counter += 1
                    print(f'RUN {run_counter} of {n_runs} [pop_size: {run_pop_size}] [pop_breed_percent: {run_pop_breed_percent}] [mutation_prob: {run_mutation_prob}] [max_attempts: {run_max_attempts}]')

                    # Run problem
                    eval_count = 0
                    start = timer()
                    run_state, run_fitness, run_curves = mlrose_hiive.genetic_alg(problem,
                                                                                  pop_size=run_pop_size,
                                                                                  pop_breed_percent=run_pop_breed_percent,
                                                                                  mutation_prob=run_mutation_prob,
                                                                                  max_attempts=run_max_attempts,
                                                                                  max_iters=max_iters,
                                                                                  random_state=42,
                                                                                  curve=True)
                    end = timer()

                    # Save curves and params
                    if run_fitness > fitness:
                        pop_size = run_pop_size
                        pop_breed_percent = run_pop_breed_percent
                        mutation_prob = run_mutation_prob
                        max_attempts = run_max_attempts
                        fitness = run_fitness
                        curves = run_curves
                        n_invocations = eval_count
                        time = end - start

    df = pandas.DataFrame(curves, columns=['fitness'])
    df['pop_size'] = pop_size
    df['pop_breed_percent'] = pop_breed_percent
    df['mutation_prob'] = mutation_prob
    df['max_attempts'] = max_attempts
    df['max_iters'] = max_iters
    df['n_invocations'] = n_invocations
    df['time'] = time
    df.to_csv(f'{STATS_FOLDER}/{algo}_{size}_stats.csv', index=False)

    print(f'{algo}_{size} run.')