Esempio n. 1
0
def get_fitness_functions():
    df = pd.read_csv("./houston2008_order.csv")
    coord_list = list(df[['lat', 'long']].apply(tuple, axis=1))
    coord_list = coord_list[0:30]
    fitness_tsp = mlrose.TravellingSales(coords=coord_list)
    problem_tsp = mlrose.TSPOpt(length=len(coord_list),
                                fitness_fn=fitness_tsp,
                                maximize=False)

    fitness_fourpeak = mlrose.FourPeaks(t_pct=.3)
    problem_fourpeak = mlrose.DiscreteOpt(length=20,
                                          fitness_fn=fitness_fourpeak)

    fitness_flipflop = mlrose.FlipFlop()
    problem_flipflop = mlrose.DiscreteOpt(length=30,
                                          fitness_fn=fitness_flipflop)

    fitness_one_max = mlrose.OneMax()
    problem_one_max = mlrose.DiscreteOpt(
        length=35,
        fitness_fn=fitness_one_max,
    )

    weights = [10, 5, 2, 8, 15]
    values = [1, 2, 3, 4, 5]
    max_weight_pct = 0.6
    fitness_knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    problem_knapsack = mlrose.DiscreteOpt(length=5,
                                          fitness_fn=fitness_knapsack)

    return {
        "tsp": problem_tsp,
        "four_peaks": problem_fourpeak,
        "one_max": problem_one_max,
    }
Esempio n. 2
0
def get_one_max(size):
    one_max = mlrose.OneMax()
    state = np.array([0, 1, 0, 1, 1, 1, 1])
    one_max.evaluate(state)
    problem = mlrose.DiscreteOpt(
        length=size,
        fitness_fn=one_max,
        maximize=True,
        max_val=2  # makes it bit string
    )
    return problem
    print("starting up")
    run_ga = (args.ga == 'y')
    run_sa = (args.sa == 'y')
    run_rh = (args.rh == 'y')
    run_mi = (args.mi == 'y')
    run_plots = (args.plot == 'y')

    vLength = 50

    iterlist = [i for i in range(vLength * 2)]
    galist = [i for i in range(vLength * 2)]

    mimiciterlist = [i for i in range(int(vLength))]

    max_one = mlrose.OneMax()
    run_toy_data(max_one, vLength, "max_one", run_ga, run_sa, run_rh, run_mi,
                 iterlist, galist, mimiciterlist)

    four_peaks = mlrose.FourPeaks(t_pct=.2)
    run_toy_data(four_peaks, vLength, "four_peaks", run_ga, run_sa, run_rh,
                 run_mi, iterlist, galist, mimiciterlist)

    weights = [random.randint(5, 30) for i in range(vLength)]
    values = [random.randint(1, 5) for i in range(vLength)]
    max_weight_pct = 0.6
    knapsack = mlrose.Knapsack(weights, values, max_weight_pct)
    run_toy_data(knapsack, vLength, "knapsack", run_ga, run_sa, run_rh, run_mi,
                 iterlist, galist, mimiciterlist)

    if run_plots == True:
Esempio n. 4
0
gen_times=[]
gen_evals=[]
mimic_times=[]
mimic_evals=[]
for i in range(10, 51, 10):
    if i==0:
        continue
#     ex = {"weights": [random.randint(1, 20) for i in range(i)], "values": [random.randint(1, 10) for i in range(i)], "state": np.array([random.randint(0, 2) for i in range(i)])}
    input_sizes.append(i)
#     weights = ex['weights']
#     values = ex['values']
#     state = ex['state']
#     edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
#     state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
    state = np.array([random.randint(0, 2) for i in range(i)])
    fitness = mlrose_hiive.OneMax()
    problem = mlrose_hiive.DiscreteOpt(length = len(state), fitness_fn = fitness, maximize = True, max_val = int(max(state))+1)
#     problem = mlrose_hiive.DiscreteOpt(length = 5, fitness_fn = fitness, maximize = False, max_val = 2)
#     problem = mlrose_hiive.DiscreteOpt(length = len(state), fitness_fn = fitness, maximize = True, max_val = int(max(state))+1)
    times = []
    best_scores = []

    start_time = time.time()
    best_state, best_fitness, fitness_curve = sa(problem,state, 30, 1000)
    elapsed_time = time.time() - start_time
    print(elapsed_time)
    times.append(elapsed_time*1000)
    best_scores.append(best_fitness)
    sa_times.append(elapsed_time*1000)
    sa_evals.append(len(fitness_curve))
    plt.close()
Esempio n. 5
0
# -*- coding: utf-8 -*-

import mlrose_hiive as mlrose
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from time import process_time

print("Running OneMax...")

fitness = mlrose.OneMax()
problem = mlrose.DiscreteOpt(100, fitness)

RANDOM_SEED = 42
MAX_ATTEMPTS = 100

#%% tuning for SA
curve_list = []
decays = [0.999, 0.99, 0.9]
for d in decays:
    schedule = mlrose.GeomDecay(decay=d)
    _, _, curve = mlrose.simulated_annealing(
        problem,
        schedule=schedule,
        max_attempts=MAX_ATTEMPTS,
        max_iters=500,
        curve=True,
        random_state=RANDOM_SEED,
    )
    curve_list.append(curve)
Esempio n. 6
0
def one_max():
    print('One Max')
    sa = []
    rhc = []
    ga = []
    mim = []

    input_sizes = [100]

    for i in input_sizes:
        state = np.array([np.random.randint(0, 2) for i in range(i)])
        # state = np.zeros(i)
        fitness = mlr.OneMax()
        problem = mlr.DiscreteOpt(length=i,
                                  fitness_fn=fitness,
                                  maximize=True,
                                  max_val=2)

        best_state, best_fitness, fitness_curve, time = randomized_hill_climb(
            problem, state, 100, 600)
        rhc.append((best_fitness, fitness_curve, time))

        best_state, best_fitness, fitness_curve, time = simulated_annealing(
            problem, state, 100, 600)
        sa.append((best_fitness, fitness_curve, time))

        best_state, best_fitness, fitness_curve, time = genetic_algorithm(
            problem, state, 100, 600)
        ga.append((best_fitness, fitness_curve, time))

        best_state, best_fitness, fitness_curve, time = mimic(
            problem, state, 100, 600)
        mim.append((best_fitness, fitness_curve, time))

    plot_data([i + 1 for i in range(len(rhc[0][1]))],
              rhc[0][1],
              title="OneMax (Input Size = " + str(len(state)) + ")",
              x_label="Iterations",
              y_label="Fitness Score",
              color="blue",
              label='RHC')

    plot_data([i + 1 for i in range(len(sa[0][1]))],
              sa[0][1],
              title="OneMax (Input Size = " + str(len(state)) + ")",
              x_label="Iterations",
              y_label="Fitness Score",
              color="orange",
              label='SA')

    plot_data([i + 1 for i in range(len(ga[0][1]))],
              ga[0][1],
              title="OneMax (Input Size = " + str(len(state)) + ")",
              x_label="Iterations",
              y_label="Fitness Score",
              color="green",
              label='GA')

    plot_data([i + 1 for i in range(len(mim[0][1]))],
              mim[0][1],
              title="OneMax (Input Size = " + str(len(state)) + ")",
              x_label="Iterations",
              y_label="Fitness Score",
              color="red",
              label='MIMIC')

    title = 'One Max'

    plt.savefig('output/' + title + '.png')
    plt.close()
Esempio n. 7
0
def path(state):
    if foo(state) > (2**(len(state) - 1)):
        return 100
    return 0


def cf2(state):
    score = 0
    for i in range(len(state)):
        score += (state[i] * (i + 1) % 7)
    return score


FITNESS_FUNCS = {
    'fourpeaks': mlrose.FourPeaks(),
    'onemax': mlrose.OneMax(),
    #    'path': mlrose.CustomFitness(path, problem_type='discrete'),
    'flipflop': mlrose.FlipFlop(),
    #    'cliffs': mlrose.CustomFitness(cf1, problem_type='discrete'),
    #    'cliffs': mlrose.CustomFitness(is_larger, problem_type='discrete'),
    #    'max2color': mlrose.MaxKColorGenerator.generate(seed=42, number_of_nodes=PROBLEM_LENGTH, max_colors=2),
    #    'mod': mlrose.CustomFitness(cf2, problem_type='discrete')
}

RANDOM_STATE = 42
DEFAULTS = {'random_state': RANDOM_STATE, 'curve': True, 'max_attempts': 10}

ALGORITHMS = {
    'rhc': lambda p: mlrose.random_hill_climb(p, **DEFAULTS),
    'sa': lambda p: mlrose.simulated_annealing(p, **DEFAULTS),
    'ga': lambda p: mlrose.genetic_alg(p, **DEFAULTS),
Esempio n. 8
0
def om_fitness_fn(state):
    global eval_count
    fitness = mlrose_hiive.OneMax()
    eval_count += 1
    return fitness.evaluate(state)
Esempio n. 9
0
 def run_OneMax(self, mode=None):
     fitness_fn = mlrose.OneMax()
     self.run_complexity(fitness_fn, mode)