Example #1
0
def compare_methods(num_char):
    global count
    count=0
    fitness_obj=mlrose.CustomFitness(heter_string_fn)
    opt=mlrose.DiscreteOpt(num_char,fitness_obj,maximize=True,max_val=num_char)
    best_state_climb,best_fitness_climb,fitness_curve_climb=mlrose.random_hill_climb(opt,curve=True)
    print('---------------------random hill climb-------------------------')
    print('hill climbing best state for heter-string problem:',best_state_climb)
    print('hill climbing best fitness for heter-string problem:',best_fitness_climb)
    print('hill climbing fitting curve for heter-string problem:',fitness_curve_climb)
    print('number of fitness call used:',count)
    count=0
    print('-------------------simulated annealing-------------------------')
    best_state_ann,best_fitness_ann,fitness_curve_ann=mlrose.simulated_annealing(opt,schedule=mlrose.ExpDecay(),curve=True)
    print('simulated annealing best state for heter-string problem:',best_state_ann)
    print('simulated annealing best fitness for heter-string problem:',best_fitness_ann)
    print('simulated annealing fitting curve for heter-string problem:',fitness_curve_ann)
    print('number of fitness call used:',count)
    count=0
    best_state_ga,best_fitness_ga,fitness_curve_ga=mlrose.genetic_alg(opt,pop_size=200, mutation_prob=0.5,curve=True)
    print('---------------------genetic alg----------------------------')
    print('genetic algorithm best state for heter-string problem:',best_state_ga)
    print('genetic algorithm best fitnees for heter-string problem:',best_fitness_ga)
    print('genetic algorithm fitness curve for heter-string problem:',fitness_curve_ga)
    print('number of fitness call used:',count)
    count=0
    best_state_mimic,best_fitness_mimic,fitness_curve_mimic=mlrose.mimic(opt,pop_size=200,curve=True)
    print('------------------------mimic-------------------------------')
    print('mimic best state for heter-string problem:',best_state_mimic)
    print('mimic best fitness value for heter-string problem:',best_fitness_mimic)
    print('mimic curve for heter-string problem:',fitness_curve_mimic)
    print('number of fitness call used:',count)
    count=0
    plt.figure(figsize=(10,10))
    plt.subplot(221)
    plt.plot(fitness_curve_climb)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.ylim(20,50)
    plt.title('random hill climb')
    plt.subplot(222)
    plt.plot(fitness_curve_ann)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.ylim(20,50)
    plt.title('simulated annealing')
    plt.subplot(223)
    plt.plot(fitness_curve_ga)
    plt.ylim(20,50)
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.title('genetic algorithm')
    plt.subplot(224)
    plt.plot(fitness_curve_mimic)
    plt.ylim(20,50)
    plt.title('mimic')
    plt.ylabel('fitness')
    plt.xlabel('num_iter')
    plt.show()
Example #2
0
def queens_problem(n=8):
    queens_max = lambda state: sum(np.arange(len(state))) - mlrose.Queens(
    ).evaluate(state)
    fitness_queens = mlrose.CustomFitness(queens_max)
    return mlrose.DiscreteOpt(length=n,
                              fitness_fn=fitness_queens,
                              maximize=True,
                              max_val=n)
Example #3
0
def get_problem(size=100, t_pct=0.06):
    global orig_fitness_func
    orig_fitness_func = mlrose_hiive.ContinuousPeaks(t_pct)

    fitness = mlrose_hiive.CustomFitness(fitness_func)
    problem = mlrose_hiive.DiscreteOpt(length=size,
                                       fitness_fn=fitness,
                                       maximize=True)

    return problem
Example #4
0
def compare_multi_round_k_color():
    global count
    count = 0
    fitness_obj = mlrose.CustomFitness(k_color_fit)
    opt = mlrose.DiscreteOpt(50, fitness_obj, maximize=True, max_val=8)
    fitness_list_rhc = []
    fitness_list_ann = []
    fitness_list_genetic = []
    fitness_list_mimic = []
    num_sample_rhc = []
    num_sample_ann = []
    num_sample_genetic = []
    num_sample_mimic = []
    for i in range(20):
        best_state_climb, best_fitness_climb, fitness_curve_climb = mlrose.random_hill_climb(
            opt, curve=True)
        fitness_list_rhc.append(best_fitness_climb)
        num_sample_rhc.append(count)
        count = 0
        best_state_ann, best_fitness_ann, fitness_curve_ann = mlrose.simulated_annealing(
            opt, schedule=mlrose.ExpDecay(), curve=True)
        fitness_list_ann.append(best_fitness_ann)
        num_sample_ann.append(count)
        count = 0
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            opt, pop_size=500, mutation_prob=0.5, curve=True)
        fitness_list_genetic.append(best_fitness_ga)
        num_sample_genetic.append(count)
        count = 0
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            opt, pop_size=500, curve=True)
        fitness_list_mimic.append(best_fitness_mimic)
        num_sample_mimic.append(count)
        count = 0
    plt.figure(figsize=(10, 6))
    plt.subplot(121)
    plt.plot(fitness_list_rhc, label='rhc')
    plt.plot(fitness_list_ann, label='ann')
    plt.plot(fitness_list_genetic, label='ga')
    plt.plot(fitness_list_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('finess value')
    plt.title('fitness value comparision')
    plt.legend(loc='lower right')
    plt.subplot(122)
    plt.plot(num_sample_rhc, label='rhc')
    plt.plot(num_sample_ann, label='ann')
    plt.plot(num_sample_genetic, label='ga')
    plt.plot(num_sample_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('fitness calls')
    plt.title('fitness call number comparision')
    plt.legend(loc='upper right')
    plt.show()
def main():
    verbose = True
    num_runs = 20
    max_iters = 500

    # define custom fitness function to maximize instead of minimize
    fitness_fn = mlrose.CustomFitness(queens_max)

    # define optimization problem
    length = 16
    nq_problem = mlrose.QueensOpt(
        length=length,
        fitness_fn=fitness_fn,
        maximize=True,
    )

    nq_problem.set_mimic_fast_mode(True)

    # set initial state
    initial_state = np.random.randint(0, length, size=length)

    # randomized hill climbing
    rhc_fitness_dfs = n_queens_rhc(nq_problem, initial_state, max_iters,
                                   num_runs, verbose)
    print('---')

    # simulated annealing
    sa_fitness_dfs = n_queens_sa(nq_problem, initial_state, max_iters,
                                 num_runs, verbose)
    print('---')

    # genetic algorithm
    ga_fitness_dfs = n_queens_ga(nq_problem, max_iters, num_runs, verbose)
    print('---')

    # MIMIC algorithm
    mimic_fitness_dfs = n_queens_mimic(nq_problem, max_iters, num_runs,
                                       verbose)
    print('---')

    # compare algorithm performance
    plotting.compare_algos(
        problem_name='n_queens',
        rhc_dfs=rhc_fitness_dfs,
        sa_dfs=sa_fitness_dfs,
        ga_dfs=ga_fitness_dfs,
        mimic_dfs=mimic_fitness_dfs,
    )
def scale_ann(train_features_spam_norm, train_labels_spam,
              test_features_spam_norm, test_labels_spam):
    global count
    train_acc_list = []
    test_acc_list = []
    fitness_call_list = []
    loss_list = []
    for i in range(400, 4001, 400):
        count = 0
        train_features_sub = train_features_spam_norm[:i, :]
        train_labels_sub = train_labels_spam[:i]
        fitness_obj = mlrose.CustomFitness(spam_nn_fit,
                                           train_features=train_features_sub,
                                           train_labels=train_labels_sub)
        opt = mlrose.DiscreteOpt(237, fitness_obj, maximize=True, max_val=1001)
        best_state_spam, best_fitness_spam, _ = mlrose.simulated_annealing(
            opt, schedule=mlrose.ExpDecay(exp_const=0.003), curve=True)
        loss_list.append(best_fitness_spam)
        train_predict = predict(best_state_spam, train_features_sub)
        test_predict = predict(best_state_spam, test_features_spam_norm)
        fitness_call_list.append(count)
        train_acc_list.append(accuracy_score(train_labels_sub, train_predict))
        test_acc_list.append(accuracy_score(test_labels_spam, test_predict))
    plt.figure(figsize=(10, 6))
    plt.subplot(121)
    plt.plot(np.arange(400, 4001, 400), loss_list, label='-1*loss')
    plt.xlabel('training size')
    plt.ylabel('-1*loss')
    plt.title('loss versus training size')
    plt.legend()
    plt.subplot(122)
    plt.plot(np.arange(400, 4001, 400), train_acc_list, label='train')
    plt.plot(np.arange(400, 4001, 400), test_acc_list, label='test')
    plt.xlabel('training size')
    plt.ylabel('accuracy')
    plt.title('accuracy versus training size')
    plt.legend()
    plt.show()
    # fitness calls versus training size
    plt.figure(figsize=(6, 6))
    plt.plot(np.arange(400, 4001, 400), fitness_call_list, label='#.calls')
    plt.xlabel('training size')
    plt.ylabel('fitness calls')
    plt.legend()
    plt.show()
Example #7
0
def get_problem(size=100):
    global orig_fitness_func

    seed = 42
    number_of_items_types = size
    max_weight_per_item = 25
    max_value_per_item = 10
    max_weight_pct = 0.35
    max_item_count = 10
    multiply_by_max_item_count = True
    np.random.seed(seed)
    weights = 1 + np.random.randint(max_weight_per_item, size=number_of_items_types)
    values = 1 + np.random.randint(max_value_per_item, size=number_of_items_types)
    orig_fitness_func = mlrose_hiive.Knapsack(weights, values, max_weight_pct=max_weight_pct, max_item_count=max_item_count, multiply_by_max_item_count=multiply_by_max_item_count)

    fitness = mlrose_hiive.CustomFitness(fitness_func)
    problem = mlrose_hiive.DiscreteOpt(length=number_of_items_types, fitness_fn=fitness, maximize=True)
    return problem
Example #8
0
def compare_gen_mimic():
    fitness_obj = mlrose.CustomFitness(n_peak_fit)
    opt = mlrose.DiscreteOpt(1, fitness_obj, maximize=True, max_val=10000)
    global count
    count = 0
    iter_num_counter_ga = []
    fitness_list_ga = []
    iter_num_counter_mimic = []
    fitness_list_mimic = []
    for i in range(20):
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            opt, pop_size=20, mutation_prob=0.5, curve=True)
        iter_num_counter_ga.append(count)
        fitness_list_ga.append(best_fitness_ga)
        count = 0
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            opt, pop_size=20, curve=True)
        iter_num_counter_mimic.append(count)
        fitness_list_mimic.append(best_fitness_mimic)
        count = 0
    plt.figure(figsize=(8, 6))
    plt.subplot(121)
    plt.plot(fitness_list_ga, label='ga')
    plt.plot(fitness_list_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('finess value')
    plt.title('fitness value comparision')
    plt.legend(loc='lower right')
    plt.subplot(122)
    plt.plot(iter_num_counter_ga, label='ga')
    plt.plot(iter_num_counter_mimic, label='mimic')
    plt.xlabel('rounds')
    plt.ylabel('fitness call no.')
    plt.title('fitness call number comparision')
    plt.legend(loc='upper right')
    plt.show()
    plt.plot(np.arange(400, 4001, 400), train_acc_list, label='train')
    plt.plot(np.arange(400, 4001, 400), test_acc_list, label='test')
    plt.xlabel('training size')
    plt.ylabel('accuracy')
    plt.title('accuracy versus training size')
    plt.legend()
    plt.show()
    # fitness calls versus training size
    plt.figure(figsize=(6, 6))
    plt.plot(np.arange(400, 4001, 400), fitness_call_list, label='#.calls')
    plt.xlabel('training size')
    plt.ylabel('fitness calls')
    plt.legend()
    plt.show()


if __name__ == "__main__":
    train_features_spam_norm, train_labels_spam, test_features_spam_norm, test_labels_spam = split_train_test_spam(
    )
    fitness_obj = mlrose.CustomFitness(spam_nn_fit,
                                       train_features=train_features_spam_norm,
                                       train_labels=train_labels_spam)
    opt = mlrose.DiscreteOpt(237, fitness_obj, maximize=True, max_val=1001)
    rhc(opt)
    s_ann(opt)
    gen_alg(opt)
    method_compare(opt, train_features_spam_norm, train_labels_spam,
                   test_features_spam_norm, test_labels_spam)
    scale_ann(train_features_spam_norm, train_labels_spam,
              test_features_spam_norm, test_labels_spam)
Example #10
0
max_it = 5000
attempts = max_it * 3 / 4

rhc_fitnesses = []
sa_fitnesses = []
ga_fitnesses = []
m_fitnesses = []
rhc_times = []
sa_times = []
ga_times = []
m_times = []

test_range = [3, 4, 5, 6]
final_prob_len = 7

fitness_cust = mlr.CustomFitness(mults_of_2)
schedule = mlr.GeomDecay(init_temp=10, decay=0.95, min_temp=0.01)

print(f"Attempts:            {attempts}")
print(f"Max Iterations:      {max_it}")
print(f"Problem Sizes:       {test_range}")
print(f"Last Problem Size:   {final_prob_len}\n\n")

part2_time = 0.0

print(f"\n######### PART 2 #########\n")

for i in test_range:
    start = time.time()
    init = np.random.choice(2**(i + 1), size=i, replace=False)
    print(f"Running for subproblem size: {i}\n        Initialization: {init}")
Example #11
0
    plt.scatter(time_taken, best_score)
    for i, txt in enumerate(keep_pct):
        plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
    plt.legend(loc='best', title='Proportion of samples kept')
    plt.grid()
    generate_graph(graph_file + "mimic_scatter", graph_title + "MIMIC",
                   "Time Taken", "Best Score achieved")

    print('Proportion of samples kept: ', keep_pct)
    print('Best scores reached: ', best_score)
    print('Time taken to do that: ', time_taken)
    print('Function evaluations taken: ', fn_evals_taken)

if __name__ == "__main__":
    # Initialize fitness function object using Custom function
    fitness_fn = mlrose_hiive.CustomFitness(ks_fitness_fn)
    # Define optimization problem object
    N = 10
    problem = mlrose_hiive.DiscreteOpt(length=N, fitness_fn=fitness_fn, maximize=True, max_val=2)
    max_iters = 1500
    iterations = range(0, max_iters, 50)
    random_seed = 1
    graph_file = 'ks_'
    graph_title = 'Knapsack Problem - '
    print('***************Knapsack Optimization Problem*****************')
    # Random hill climbing
    print('--------------Random Hill Climbing---------------')
    rhc(problem, iterations, random_seed, graph_file, graph_title)
    # simulate annealing
    print('--------------Simulated Annealing---------------')
    sa(problem, iterations, random_seed, graph_file, graph_title)
for i in cN:
    color = colorGen.generate(RANDOM_STATE, number_of_nodes=i, max_connections_per_node=r, max_colors=k)
    problems_list.append((color, f'color_{i}'))

# N-Queens - Custom fitness to maximize
# qN = 8
# queens_Prob = mlr.QueensOpt(length=qN)
def QMaxFit(state):
    fitness_cnt = 0
    for i in range(len(state) - 1):
        for j in range(i + 1, len(state)):
            if (state[j] != state[i]) and (state[j] != state[i] + (j - i)) and (state[j] != state[i] - (j - i)):
                fitness_cnt += 1
    return fitness_cnt

QMF_cust = mlr.CustomFitness(QMaxFit)  # max val is len(list(itertools.combinations(range(len(state)), 2)))
qN = [20, 35, 50, 65, 80, 95]
for i in qN:
    problems_list.append((mlr.QueensOpt(length=i, fitness_fn=QMF_cust, maximize=True), f'queens_{i}'))

# Continuous Peaks
# pN = 12

t_pct = 0.15
peaksGen = mlr.ContinuousPeaksGenerator()
pN = [5, 10, 15, 30, 55, 100]
for i in pN:
    problems_list.append((peaksGen.generate(RANDOM_STATE, i, t_pct), f'peaks_{i}'))

print('Building experiments...')
## Algorithms
Example #13
0
    # For all pairs of queens
    for i in range(len(state) - 1):
        for j in range(i + 1, len(state)):
            # Check for horizontal, diagonal-up and diagonal-down attacks
            if (state[j] != state[i]) \
                and (state[j] != state[i] + (j - i)) \
                and (state[j] != state[i] - (j - i)):

                # If no attacks, then increment counter
                fitness_cnt += 1

    return fitness_cnt

# Initialize custom fitness function object
q_fitness = mlrh.CustomFitness(queens_max)
# q_fitness = mlrose.Queens()
prob = mlrh.QueensOpt(length=length,
                      fitness_fn=q_fitness,
                      maximize=True)
experiment_name = "queen_prob"
output_directory = "queen"

# SA
sa = mlrh.SARunner(problem=prob,
                   experiment_name=experiment_name,
                   output_directory=output_directory,
                   seed=random_state,
                   max_attempts=200,
                   iteration_list=[2000],
                   temperature_list=[0.01, 0.1, 1, 10, 100, 1000],
Example #14
0
                and (state[j] != state[i] - (j - i)):

                # If no attacks, then increment counter
                fitness += 1

    return fitness


# Check function is working correctly
state = np.array([1, 4, 1, 3, 5, 5, 2, 7])

# The fitness of this state should be 22
queens_max(state)

# Initialize custom fitness function object
fitness_cust = mlrose.CustomFitness(queens_max)

# Define optimization problem object
problem_cust = mlrose.DiscreteOpt(length=8,
                                  fitness_fn=fitness_cust,
                                  maximize=True,
                                  max_val=8)
# Solve using simulated annealing - attempt 1
best_state, best_fitness, fitness_curve = mlrose.simulated_annealing(
    problem_cust,
    schedule=schedule,
    max_attempts=10,
    max_iters=1000,
    init_state=init_state,
    random_state=1)
print('Example 2 - Attempt 1')
Example #15
0
        util.plot_MIMpop(algo, problem_fit, max_attempts, mimpct, maxIter,
                         seed, min)

        # MIMIC - Keep Percent
        util.plot_MIMICpct(algo, problem_fit, max_attempts, mimpop, maxIter,
                           seed)


if __name__ == "__main__":
    print('Continuous Peaks - SA best')
    seed = 1
    random.seed(seed)
    bit_length = 100
    SAschedule = mlrose.GeomDecay(init_temp=10000, decay=0.95, min_temp=0.01)
    # fitness = mlrose.ContinuousPeaks(t_pct=0.02)
    fitness = mlrose.CustomFitness(
        util.fit_eval_count(mlrose.ContinuousPeaks, t_pct=0.02))
    problem_fit = mlrose.DiscreteOpt(length=bit_length,
                                     fitness_fn=fitness,
                                     maximize=True,
                                     max_val=2)

    run_ro_algos('CP-testingran',
                 problem_fit,
                 sa_sched=SAschedule,
                 init_temp=1,
                 gapop=900,
                 gamut=0.7,
                 mimpop=800,
                 mimpct=0.3,
                 seed=seed,
                 restarts=100,
Example #16
0
max_it = 2000
attempts = max_it / 2

rhc_fitnesses = []
sa_fitnesses = []
ga_fitnesses = []
m_fitnesses = []
rhc_times = []
sa_times = []
ga_times = []
m_times = []

test_range = [4, 6, 8, 10, 15, 20, 30]
final_prob_len = 60

fitness_cust = mlr.CustomFitness(orderedmax)
schedule = mlr.ArithDecay()

print(f"Attempts:            {attempts}")
print(f"Max Iterations:      {max_it}")
print(f"Problem Sizes:       {test_range}")
print(f"Last Problem Size:   {final_prob_len}\n\n")

print(f"\n######### PART 2 #########\n")

part2_time = 0.0

for i in test_range:
    start = time.time()
    print(f"Running for subproblem size: {i}")
Example #17
0
problem_fit = mlrose.DiscreteOpt(length = 15, fitness_fn= fitness)

# Solve the problem with genetic algorithm
plotting.plot_optimization_problem_fitness(problem_fit, 100, 2, 'N-Queens')

# CODE SOURCED FROM
# https://mlrose.readthedocs.io/en/stable/source/tutorial1.html

# Define alternative N-Queens fitness function for maximization problem.
def queens_max(state):

    # Initialize counter
    fitness_cnt = 0

    # For all pairs of queens
    for i in range(len(state) - 1):
        for j in range(i + 1, len(state)):

            # Check for horizontal, diagonal-up and diagonal-down attacks
            if (state[j] != state[i]) \
                and (state[j] != state[i] + (j - i)) \
                and (state[j] != state[i] - (j - i)):

                # If no attacks, then increment counter
                fitness_cnt += 1

    return fitness_cnt

# Initialize custom fitness function object.
cust_fitness = mlrose.CustomFitness(queens_max)
Example #18
0
eval_count = 0

rhc_repeats = 1
ga_repeats = 200
sa_repeats = 1
mimic_repeats = 1


def fitness_counter(state):
    global eval_count
    fitness = mlrose.FourPeaks(t_pct=0.25)
    eval_count += 1
    return fitness.evaluate(state)


fitness = mlrose.CustomFitness(fitness_counter)
problem = mlrose.DiscreteOpt(length=40,
                             fitness_fn=fitness,
                             maximize=True,
                             max_val=2)

ga_evals_list = []
df_ga_stats_list = []
df_ga_curves_list = []
for r in range(ga_repeats):
    ga = GARunner(problem=problem,
                  experiment_name="ga_test",
                  output_directory="./results/",
                  seed=r,
                  iteration_list=2 ** np.arange(18),
                  max_attempts=50,
Example #19
0
def get_problem(size=50):
    fitness = mlrose_hiive.CustomFitness(fitness_func)
    problem = mlrose_hiive.DiscreteOpt(length=size,
                                       fitness_fn=fitness,
                                       maximize=True)
    return problem