Beispiel #1
0
 def create_problem(self):
     fitness = mlrose.SixPeaks(t_pct=self.t_pct)
     problem = mlrose.DiscreteOpt(length=self.length,
                                  fitness_fn=fitness,
                                  maximize=True,
                                  max_val=2)
     return problem
#Michael Groff

import numpy as np
import matplotlib.pyplot as plt
import time
import mlrose

if __name__ == "__main__":
    print("Four Peaks Problem")
    fitness = mlrose.SixPeaks(t_pct=0.15)
    bits = range(10, 105, 5)
    rhct = []
    rhco = []
    sat = []
    sao = []
    gat = []
    gao = []
    mmt = []
    mmo = []

    for i in bits:
        print(i)
        popt = mlrose.DiscreteOpt(length=i, fitness_fn=fitness)

        t = time.clock()
        state, opt = mlrose.random_hill_climb(problem=popt, max_attempts=10)
        s = time.clock()
        rhct.append(s - t)
        rhco.append(opt)

        t = time.clock()
Beispiel #3
0
    axs[1].set_title("Best Fitness")
    best_fitness = stats["best_fitness"].unstack()
    if (best_fitness.max().max() > 1000000):
        axs[1].set_ylabel(r"$\frac{fitness}{10^{N}}$")
        best_fitness.apply(lambda x: x / np.power(10, x.name / 10),
                           axis=1).plot(ax=axs[1])
    else:
        best_fitness.plot(ax=axs[1])
        axs[1].set_ylabel("fitness")
    axs[1].legend()

    return fig


continuous_peaks = mlrose.ContinuousPeaks(t_pct=0.1)
six_peaks = mlrose.SixPeaks(t_pct=0.1)
flip_flop = mlrose.FlipFlop()
product_consec_ones = mlrose.CustomFitness(fitness_fn=prod_consec_one,
                                           problem_type="discrete")
count_ones = mlrose.CustomFitness(fitness_fn=lambda state: sum(state),
                                  problem_type="discrete")
convert_bin_swap = mlrose.CustomFitness(fitness_fn=func_convert_bin_swap,
                                        problem_type="discrete")

if __name__ == "__main__":
    records_by_prob = {six_peaks: [], flip_flop: [], convert_bin_swap: []}

    nbits = range(10, 101, 10)
    for fitness_fn, records in records_by_prob.items():
        print(fitness_fn)
        for nbit in nbits:
Beispiel #4
0
def main():
    np.random.seed(0)
    i_s = np.random.randint(2, size=50)
    fitness = mlrose.SixPeaks()
    problem = mlrose.DiscreteOpt(length=len(i_s),
                                 fitness_fn=fitness,
                                 maximize=True,
                                 max_val=2)

    # Define decay schedule
    schedule = mlrose.ExpDecay()

    # Define initial state
    init_state = i_s
    x_s = []
    y_s = []
    z_s = ['RHC', 'SA', 'GA', 'MIMIC']
    w_s = []
    print('begin hill climb')
    # Solve problem using simulated annealing
    best_state, best_fitness, learning_curve, timing_curve = mlrose.random_hill_climb(
        problem, max_attempts=10000, curve=True, random_state=1)
    print(best_state)
    print(best_fitness)
    x_s.append(np.arange(0, len(learning_curve)))
    y_s.append(learning_curve)
    w_s.append(timing_curve)
    print('begin SA')
    best_state, best_fitness, learning_curve, timing_curve = mlrose.simulated_annealing(
        problem,
        max_attempts=250,
        max_iters=250,
        schedule=schedule,
        init_state=init_state,
        curve=True,
        random_state=1)
    print(best_state)
    print(best_fitness)
    x_s.append(np.arange(0, len(learning_curve)))
    y_s.append(learning_curve)
    w_s.append(timing_curve)
    print('begin GA')
    best_state, best_fitness, learning_curve, timing_curve = mlrose.genetic_alg(
        problem,
        pop_size=200,
        mutation_prob=0.1,
        max_attempts=250,
        max_iters=250,
        curve=True,
        random_state=1)
    print(best_state)
    print(best_fitness)
    x_s.append(np.arange(0, len(learning_curve)))
    y_s.append(learning_curve)
    w_s.append(timing_curve)
    print('begin MIMIC')
    best_state, best_fitness, learning_curve, timing_curve = mlrose.mimic(
        problem,
        pop_size=250,
        keep_pct=0.2,
        max_attempts=250,
        max_iters=250,
        curve=True,
        random_state=1,
        fast_mimic=True)
    print(best_state)
    print(best_fitness)
    x_s.append(np.arange(0, len(learning_curve)))
    y_s.append(learning_curve)
    w_s.append(timing_curve)
    for x, y, z in zip(x_s, y_s, z_s):
        plt.plot(x, y, label=z)
    plt.legend()
    plt.title('Randomized Optimization Iterations vs Fitness Function Value')
    plt.xlabel('Function iteration count')
    plt.ylabel('Fitness function value')
    plt.show()
    plt.clf()
    for x, w, z in zip(x_s, w_s, z_s):
        plt.plot(x, w, label=z)
    plt.legend()
    plt.title('Randomized Optimization Time vs Fitness Function Value')
    plt.xlabel('Function iteration count')
    plt.ylabel('Time in Second')
    plt.show()