def OptKnapsack():
    fitness = mlrose.Knapsack(weights=[
        10, 5, 2, 8, 15, 4, 3, 6, 7, 20, 21, 25, 22, 28, 25, 24, 23, 26, 27, 30
    ],
                              values=[
                                  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                                  14, 15, 16, 17, 18, 19, 20
                              ],
                              max_weight_pct=0.6)
    validate(fitness, size, 'Knapsack')
def knapsack(n_items=5):
    max_val = 5
    weights = np.random.choice(range(1, 10), n_items)
    values = np.random.choice(range(1, max_val), n_items)

    fitness_fn = mlrose.Knapsack(weights, values)

    problem = mlrose.DiscreteOpt(length=n_items,
                                 fitness_fn=fitness_fn,
                                 max_val=max_val)

    return problem
예제 #3
0
 def create_problem(self):
     """weights = [10, 5, 2, 8, 15, 4, 12, 9, 7]
     values =  [1, 2, 3, 4, 5, 6, 7, 8, 9]"""
     weights = [10, 5, 2, 8, 15]
     values = [1, 2, 3, 4, 5]
     max_weight_pct = 0.6
     fitness = mlrose.Knapsack(weights, values, max_weight_pct)
     problem = mlrose.DiscreteOpt(length=5,
                                  fitness_fn=fitness,
                                  maximize=True,
                                  max_val=6)
     return problem
예제 #4
0
    def Knapsack(self, length=10, max_weight_pct=0.2, verbose=False):
        def gen_data(length):
            weights = []
            values = []
            max_weight = 50
            max_val = 50
            for i in range(length):
                weights.append(np.random.randint(1, max_weight))
                values.append(np.random.randint(1, max_val))
            return [weights, values]

        self.problem = 'knapsack{l}'.format(l=length)
        self.verbose = verbose
        weights, values = gen_data(length)
        fitness_fn = mlrose.Knapsack(weights, values, max_weight_pct)
        # define optimization problem object
        self.problem_fit = mlrose.DiscreteOpt(length=len(weights),
                                              fitness_fn=fitness_fn,
                                              maximize=True)
예제 #5
0
파일: a2.py 프로젝트: rkaufholz3/a2
def fitness_function(f, bits, rs, verbose):

    if verbose:
        print('\n\n----------', f, ':', bits, 'bits ----------')

    if f == 'Four Peaks':
        fitness_fn = mlrose.FourPeaks(
            t_pct=0.15
        )  # Note: T= np.ceil(t_pct * n), per source code for FourPeaks.evaluate

    elif f == 'MaxKColor':
        # fitness_fn = mlrose.MaxKColor(edges)  # default mlrose fitness function
        # edges = [(0, 1), (0, 2), (1, 3), (2, 3)]  # 4 nodes, 2 by 2 grid, no diagonals
        # edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
        edges = generate_graph(bits)
        kwargs = {'edges': edges}
        fitness_fn = mlrose.CustomFitness(
            kcolors_max,
            **kwargs)  # custom fitness function for maximization problem

    elif f == 'Knapsack':
        # weights = [10, 5, 2, 8, 15]
        # values = [1, 2, 3, 4, 5]
        weights, values = generate_knapsack(bits, rs)
        if verbose:
            print('\nKnapsack\n', weights, values)
        max_weight_pct = 0.6
        fitness_fn = mlrose.Knapsack(weights, values, max_weight_pct)

    elif f == 'FlipFlop':
        fitness_fn = mlrose.FlipFlop()

    # Check fitness for ad-hoc states
    # test_state = np.array([1, 0, 1, 1, 0])
    # print("Fitness for test_state", test_state, ":", fitness_fn.evaluate(test_state))

    return fitness_fn
예제 #6
0
    y = state[10] + 2 * state[11] + 4 * state[12] + 8 * state[13] + 16 * state[
        14] + 32 * state[15] + 64 * state[16] + 128 * state[17] + 256 * state[
            18] + 512 * state[19]
    z = max(
        0, 10 * np.sin(x / 20 + 2.3) + 4 * (x % 10) + 10 * np.sin(y / 25 + 1) +
        4 * (y % 15) + 20)
    return z


multipeak_fn = mlrose.CustomFitness(multipeak_eq)

# Knapsack
knapsack_weights = [5, 10, 15, 20, 25, 30, 35, 40, 45]
knapsack_values = np.arange(1, len(knapsack_weights) + 1)
knapsack_max_weight = 0.7
knapsack_fn = mlrose.Knapsack(knapsack_weights, knapsack_values,
                              knapsack_max_weight)

# K-colors
kcolor_edges = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),
                (0, 9), (1, 4), (1, 5), (1, 6), (1, 7), (2, 3), (2, 5), (2, 7),
                (3, 5), (3, 6), (3, 7), (3, 9), (4, 5), (5, 6), (5, 7), (5, 8),
                (6, 7), (7, 8), (8, 9)]
kcolor_edges = list({tuple(sorted(edge)) for edge in kcolor_edges})
kcolor_fn = mlrose.MaxKColor(kcolor_edges)


def multipeak_prob_wrapper():
    return mlrose.DiscreteOpt(length=20, fitness_fn=multipeak_fn)


def knapsack_prob_wrapper():
    print('Genetic Algorithm: ')
    print("Accuracy: ", aveAccuracyScoreGA)
    print(
        '///////////////////////////////////////////////////////////////////////////////'
    )

    edges = [(0, 1), (1, 4), (1, 3), (2, 4), (3, 7), (4, 5), (4, 6), (5, 6),
             (5, 7), (6, 7), (5, 3), (0, 3), (0, 2), (1, 7), (1, 6), (0, 4),
             (1, 2), (3, 4), (8, 0), (8, 4), (8, 2), (8, 1)]
    weights = [3, 4, 5, 7, 9, 6, 10, 11]
    values = [1, 2, 3, 4, 5, 6, 7, 8]
    maxWeightPct = 2
    fitnessArray = [
        mlrose.Queens(),
        mlrose.MaxKColor(edges),
        mlrose.Knapsack(weights, values, maxWeightPct)
    ]
    titleArray = ['Queens', 'Max Color', 'Knapsack']
    for x in range(len(fitnessArray)):
        aveFitnessRHC = 0
        aveFitnessSA = 0
        aveFitnessGA = 0
        aveFitnessM = 0
        aveTimeRHC = 0
        aveTimeSA = 0
        aveTimeGA = 0
        aveTimeM = 0
        for y in range(0, 10):
            print('Results for: ', titleArray[x])
            bestFitnessRHC, timeRHC = radomHillClimb(fitnessArray[x], x)
            bestFitnessSA, timeSA = simulatedAnnealing(fitnessArray[x], x)
예제 #8
0
import mlrose
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D

weights = [
    10, 4, 3, 2, 7, 9, 6, 8, 12, 14, 15, 18, 43, 23, 67, 15, 27, 18, 14, 12,
    27, 45, 20
]
values = [
    500, 25, 45, 900, 345, 456, 768, 980, 234, 564, 432, 568, 43, 11, 10, 11,
    34, 23, 459, 23, 65, 32, 6889
]
print(len(weights), len(values))

max_weight_pct = 0.5
fitness = mlrose.Knapsack(weights, values, max_weight_pct)
problem_fit = mlrose.DiscreteOpt(length=19,
                                 fitness_fn=fitness,
                                 maximize=True,
                                 max_val=3)
schedule = mlrose.ExpDecay()
init_state = np.array(
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
fitness1 = mlrose.Knapsack(weights[0:4], values[0:4], max_weight_pct)
fitness2 = mlrose.Knapsack(weights[0:7], values[0:7], max_weight_pct)
fitness3 = mlrose.Knapsack(weights[0:9], values[0:9], max_weight_pct)
fitness4 = mlrose.Knapsack(weights[0:11], values[0:11], max_weight_pct)
fitness5 = mlrose.Knapsack(weights[0:13], values[0:13], max_weight_pct)
fitness6 = mlrose.Knapsack(weights[0:16], values[0:16], max_weight_pct)
fitness7 = mlrose.Knapsack(weights[0:19], values[0:19], max_weight_pct)
fitness8 = mlrose.Knapsack(weights[0:20], values[0:20], max_weight_pct)
예제 #9
0
import matplotlib.style as style
import random
import pandas as pd

lengths = [10, 20, 40, 60, 80, 100]
avgAcross = 10

length = 50
print("SOLVING KnapSack")
print("Get fitness for 100 iters on all algos")

itersList = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
problem = mlrose.DiscreteOpt(length=50,
                             maximize=True,
                             fitness_fn=mlrose.Knapsack(
                                 weights=np.random.uniform(size=length),
                                 values=np.arange(1, length + 1, 1),
                                 max_weight_pct=0.6))

fitnessRHCAll = []
fitnessSAAll = []
fitnessGAAll = []
fitnessMIMICAll = []

fitnessRHCMean = []
fitnessSAMean = []
fitnessGAMean = []
fitnessMIMICMean = []

fitnessRHCFilter = []
fitnessSAFilter = []
fitnessGAFilter = []
예제 #10
0
import random
import time

sample_sizes = []

weights = []
values = []
fitness = []
for x in range(10):
    random.seed(x)
    tempw = random.sample(range(1, 61), (x*6)+6)
    random.seed(x)
    tempv = random.sample(range(2, 62), (x*6)+6)
    weights.append(tempw)
    values.append(tempv)
    fitness.append(ml.Knapsack(weights=weights[x], values=values[x]))





main_best_fit_gene = []
main_execution_times_gene = []
main_total_iterations_gene = []
main_opt_iterations_gene = []


for y in range(10):
    best_fit_gene = []
    execution_times_gene = []
    total_iterations_gene = []
예제 #11
0
ga_statistics_fn_evals = []
ga_statistics_time = []
ga_statistics_fitness = []

mimic_statistics = []
mimic_statistics_fn_evals = []
mimic_statistics_time = []
mimic_statistics_fitness = []
mimic_state = []
mimic_fitness = []

index = 0

#Random Hill Climbing
for each in problem_size:
    fitness_fn = mlrose.Knapsack(weights_list[index], values_list[index],
                                 max_weight_pct)
    problem = mlrose.DiscreteOpt(length=each,
                                 fitness_fn=fitness_fn,
                                 maximize=True,
                                 max_val=2)
    best_state, best_fitness, statistics = mlrose.random_hill_climb(
        problem=problem,
        max_attempts=max_attempts,
        max_iters=max_iters,
        restarts=10,
        return_statistics=True)
    rhc_state.append(best_state)
    rhc_fitness.append(best_fitness)
    rhc_statistics_fn_evals.append(statistics['fitness_evals'])
    rhc_statistics_time.append(statistics['time'])
    rhc_statistics_fitness.append(best_fitness)
예제 #12
0
파일: knapsack.py 프로젝트: zparnold/cs7641
def main():
    name_of_exp = "Knapsack"
    weights = [10, 5, 2, 8, 15]
    values = [1, 2, 3, 4, 5]
    max_weight_pct = 0.6
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)
    problem = mlrose.DiscreteOpt(length=5,
                                 fitness_fn=fitness,
                                 maximize=True,
                                 max_val=5)

    # Define initial state
    init_state = np.zeros(5)
    x_s = []
    y_s = []
    z_s = ['RHC', 'SA', 'GA', 'MIMIC']
    w_s = []
    max_val = 18.0
    found_flag = False
    for restarts in np.arange(0, 5):
        if found_flag:
            break
        for max_iter_atts in np.arange(10, 1000, 10):
            if found_flag:
                break
            # Solve problem using simulated annealing
            best_state, best_fitness, learning_curve, timing_curve = mlrose.random_hill_climb(
                problem,
                max_attempts=int(max_iter_atts),
                max_iters=int(max_iter_atts),
                restarts=int(restarts),
                init_state=init_state,
                curve=True,
                random_state=1)
            if best_fitness >= max_val:
                x_s.append(np.arange(0, len(learning_curve)))
                y_s.append(learning_curve)
                w_s.append(timing_curve)
                print(best_state)
                print(best_fitness)
                print(max_iter_atts)
                print(restarts)
                found_flag = True

    found_flag = False
    for sched in [mlrose.ExpDecay(), mlrose.GeomDecay(), mlrose.ArithDecay()]:
        if found_flag:
            break
        for max_iter_atts in np.arange(10, 1000, 10):
            if found_flag:
                break
            best_state, best_fitness, learning_curve, timing_curve = mlrose.simulated_annealing(
                problem,
                max_attempts=int(max_iter_atts),
                max_iters=int(max_iter_atts),
                schedule=sched,
                init_state=init_state,
                curve=True,
                random_state=1)
            if best_fitness >= max_val:
                x_s.append(np.arange(0, len(learning_curve)))
                y_s.append(learning_curve)
                w_s.append(timing_curve)
                print(best_state)
                print(best_fitness)
                print(max_iter_atts)
                print(sched)
                found_flag = True

    found_flag = False
    for prob in np.arange(0.1, 1.1, 0.1):
        if found_flag:
            break
        for pop_size in np.arange(100, 5000, 100):
            if found_flag:
                break
            for max_iter_atts in np.arange(10, 1000, 10):
                if found_flag:
                    break
                best_state, best_fitness, learning_curve, timing_curve = mlrose.genetic_alg(
                    problem,
                    pop_size=int(pop_size),
                    mutation_prob=prob,
                    max_attempts=int(max_iter_atts),
                    max_iters=int(max_iter_atts),
                    curve=True,
                    random_state=1)
                if best_fitness >= max_val:
                    x_s.append(np.arange(0, len(learning_curve)))
                    y_s.append(learning_curve)
                    w_s.append(timing_curve)
                    print(best_state)
                    print(best_fitness)
                    print(max_iter_atts)
                    print(prob)
                    print(pop_size)
                    found_flag = True

    found_flag = False
    for prob in np.arange(0.1, 1.1, 0.1):
        if found_flag:
            break
        for pop_size in np.arange(100, 5000, 100):
            if found_flag:
                break
            for max_iter_atts in np.arange(10, 1000, 10):
                if found_flag:
                    break
                best_state, best_fitness, learning_curve, timing_curve = mlrose.mimic(
                    problem,
                    pop_size=int(pop_size),
                    keep_pct=prob,
                    max_attempts=int(max_iter_atts),
                    max_iters=int(max_iter_atts),
                    curve=True,
                    random_state=1,
                    fast_mimic=True)
                if best_fitness >= max_val:
                    x_s.append(np.arange(0, len(learning_curve)))
                    y_s.append(learning_curve)
                    w_s.append(timing_curve)
                    print(best_state)
                    print(best_fitness)
                    print(max_iter_atts)
                    print(prob)
                    print(pop_size)
                    found_flag = True

    for x, y, z in zip(x_s, y_s, z_s):
        plt.plot(x, y, label=z)
    plt.legend()
    plt.title(
        'Randomized Optimization Iterations vs Fitness Function Value for {}'.
        format(name_of_exp))
    plt.xlabel('Function iteration count')
    plt.ylabel('Fitness function value')
    plt.show()
    plt.clf()
    for x, w, z in zip(x_s, w_s, z_s):
        plt.plot(x, w, label=z)
    plt.legend()
    plt.title(
        'Randomized Optimization Time vs Fitness Function Value for {}'.format(
            name_of_exp))
    plt.xlabel('Function iteration count')
    plt.ylabel('Time in Seconds')
    plt.show()
예제 #13
0
def optimizationFunction(n,iMin, iMax, iStep, jMin, jMax, jStep):
    
    
    np.random.seed(100)
    optScenarios = ['Knap Sack', 'Four Peaks', 'K - Colors']
    for x in range(3):
        if x == 0:
            weights = np.random.randint(1,10,size=n)
            #values = np.random.randint(1,50,size=n)
            values = [i for i in range(1,n+1)]
            max_weight_pct = 0.5
            fitnessFunction = mlrose.Knapsack(weights, values, max_weight_pct)
            optModel = mlrose.DiscreteOpt(len(values), fitness_fn = fitnessFunction, maximize=True)
        elif x == 1:
            inp = [0] * int(n/2) + [1]*int(n - int(n/2))
            np.random.shuffle(inp)
            fitnessFunction = mlrose.FourPeaks(t_pct = 0.15)
            optModel = mlrose.DiscreteOpt(len(inp), fitness_fn = fitnessFunction, maximize =True)
        elif x == 2:
            edges = [(np.random.randint(0,n), np.random.randint(0,n)) for ab in range(n)]
            fitnessFunction = mlrose.MaxKColor(edges)
            optModel = mlrose.DiscreteOpt(len(edges), fitness_fn = fitnessFunction, maximize =True)
                
        decay = mlrose.ExpDecay()
    

        optResults = {'iterations':[],'attempts':[],'fitness':[],'time':[], 'optimization':[]}
        for i in range(iMin,iMax,iStep):

            for j in range(jMin,jMax,jStep):
                start_time = timer()
                best_state, best_fitness = mlrose.random_hill_climb(optModel, max_attempts = j, max_iters = i, random_state=100)
                opt_time = timer() - start_time
           
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Random Hill')
                start_time = timer()
                best_state, best_fitness = mlrose.simulated_annealing(optModel, schedule=decay, max_attempts = j,max_iters = i,random_state=1000)
                opt_time = timer() - start_time
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Simulated Annealing')
           
                start_time = timer()
                best_state, best_fitness = mlrose.genetic_alg(optModel, pop_size=200, mutation_prob = 0.25, max_attempts = j, max_iters = i, random_state=5000)
                opt_time = timer() - start_time
               
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('Genetic Algorithm')
                start_time = timer()
                best_state, best_fitness = mlrose.mimic(optModel, pop_size = 200, keep_pct = 0.3, max_attempts = j, max_iters = i, random_state=150)
                opt_time = timer() - start_time
                optResults['iterations'].append(i)
                optResults['attempts'].append(j)
                optResults['fitness'].append(best_fitness)
                optResults['time'].append(opt_time)
                optResults['optimization'].append('MIMIC')
       
        optResults = pd.DataFrame(optResults)
 
        plotGraphs(optResults,optScenarios[x])
예제 #14
0
def TravelingSalesman():
    # Create list of city coordinates
    """
    coords_list = [(1, 1), (4, 2), (5, 2), (6, 4), (4, 4), (3, 6), (1, 5), (2, 3), (10, 10), (15, 22), (2, 7), (19, 15), (11, 13),
                    (33, 24), (12, 17), (29, 2), (2, 5), (5, 19), (11, 36), (21, 37), (57, 22), (36, 12), (19, 20), (13, 19), (13, 54), (0, 5),
                    (44, 14), (45, 45), (23, 20), (16, 2), (3, 29), (21, 59), (18, 29), (2, 2), (19, 17), (39, 14), (9, 9), (48, 14), (59, 59), (29, 1)]
    """
    weights = [
        10, 5, 2, 8, 15, 20, 5, 2, 1, 20, 8, 6, 14, 22, 50, 5, 10, 12, 12, 18,
        26, 32, 4, 8, 10, 5, 22, 10, 5, 2, 8, 15, 20, 5, 2, 1, 20, 8, 6, 14,
        22, 50, 5, 10, 12, 12, 18, 26, 32, 4, 8, 10, 5, 22, 10, 5, 2, 8, 15,
        20, 5, 2, 1, 20, 8, 6, 14, 22, 50, 5, 10, 12, 12, 18, 26, 32, 4, 8, 10,
        5, 22
    ]
    values = [
        1, 2, 3, 4, 5, 2, 5, 10, 1, 4, 10, 2, 2, 8, 100, 5, 15, 24, 8, 14, 36,
        10, 5, 2, 120, 4, 8, 1, 2, 3, 4, 5, 2, 5, 10, 1, 4, 10, 2, 2, 8, 100,
        5, 15, 24, 8, 14, 36, 10, 5, 2, 120, 4, 8, 1, 2, 3, 4, 5, 2, 5, 10, 1,
        4, 10, 2, 2, 8, 100, 5, 15, 24, 8, 14, 36, 10, 5, 2, 120, 4, 8
    ]
    max_weight_pct = 0.6

    #n = len(coords_list)
    n = len(weights)

    # Initialize fitness function object using coords_list
    #fitness_coords = mlrose.TravellingSales(coords = coords_list)
    fitness = mlrose.Knapsack(weights, values, max_weight_pct)

    # Define optimization problem object
    problem = mlrose.DiscreteOpt(length=n, fitness_fn=fitness, maximize=True)

    best_state, best_fitness, score_curves, runtime = algorithm(
        problem=problem, max_attempts=500, max_iters=100)

    def plot_solution(algorithm_type: str, solution: list) -> None:
        """Given a solution, plot it and save the result to disk."""
        fig = plt.figure()
        ax = fig.add_subplot(111, aspect='equal')
        ax.set_xlim((0, n))
        ax.set_ylim((0, n))

        count = 0
        for queen in solution:
            ax.add_patch(patches.Rectangle((queen, count), 1, 1))
            count += 1
        fig.savefig(algorithm_type + '_' +
                    ''.join([str(a) for a in solution]) + '.png',
                    dpi=150,
                    bbox_inches='tight')
        plt.close(fig)

    for key, value in best_state.items():
        plot_solution(key, value)

    def PlotData(x, y, x2, y2, x3, y3, x4, y4):
        plt.figure()
        plt.plot(x, y, 'r', x2, y2, 'g', x3, y3, 'b', x4, y4, 'c', alpha=0.5)
        plt.xlabel('Iteration')
        plt.ylabel('Fitness')

        red_patch = patches.Patch(color='red', label='Genetic Algorithms')
        green_patch = patches.Patch(color='green', label='Simulated Annealing')
        blue_patch = patches.Patch(color='blue', label='MIMIC')
        cyan_patch = patches.Patch(color='cyan', label='Randomized Hill Climb')
        plt.legend(handles=[red_patch, green_patch, blue_patch, cyan_patch])

        plt.show()

    PlotData(list(range(len(score_curves['genetic algorithm']))),
             score_curves['genetic algorithm'],
             list(range(len(score_curves['simulated annealing']))),
             score_curves['simulated annealing'],
             list(range(len(score_curves['mimic']))), score_curves['mimic'],
             list(range(len(score_curves['randomized hill climbing']))),
             score_curves['randomized hill climbing'])

    return best_state, best_fitness, score_curves, runtime
예제 #15
0
#best_state, best_fitness, curve = ml.mimic(opt, curve=True, random_state = 2)

print(curve)

plt.plot(curve)
plt.savefig("4pks.png")
print(best_state)


print(best_fitness)

weights = [10.0, 5.0, 2.0, 8.0, 15.0, 3.0, 11.0]
values = [1, 12, 3, 14, 5, 10, 12]
max_weight_pct = 0.35
fitness_two = ml.Knapsack(weights=weights, values=values, max_weight_pct=max_weight_pct)

opt_two = ml.DiscreteOpt(length=7, fitness_fn=fitness_two)
#state = np.array([0, 1, 1, 1, 0])
#sprint(fitness_two.evaluate(state))

best_state_two, best_fitness_two = ml.genetic_alg(opt_two, random_state = 2)

#best_state_two, best_fitness_two = ml.random_hill_climb(opt_two, random_state = 2)

best_state_two, best_fitness_two = ml.simulated_annealing(opt_two, max_iters=1, random_state = 2)

#best_state_two, best_fitness_two = ml.mimic(opt_two, max_iters=1, random_state = 2)

print(best_state_two)
예제 #16
0
#    print(tsp_iterations)
#    print(tsp_rhfit)
#    print(tsp_smfit)
#    print(tsp_gafit)
#    print(tsp_mifit)

#knapsack
print("Travelling Knapsack")
max_weight_pct = 0.6
for items in range(5, 10, 5):
    ks_weights = random.sample(range(1, items * 2), items)
    ks_values = list(range(1, items + 1))
    max_weight_pct = 0.6
    weight_range = random.randint(2, int(items / 2) + 1)
    init_state_ks = np.random.randint(weight_range, size=items)
    fitness_ks = mlrose.Knapsack(ks_weights, ks_values, max_weight_pct)
    problem_ks = mlrose.DiscreteOpt(length=init_state_ks.shape[0],
                                    fitness_fn=fitness_ks,
                                    maximize=False)
    #ks_iterations = [10,20,50,75,100,125,200,250,300]
    ks_iterations = [1]
    max_attempts = 10
    ks_rh_fitness = []
    ks_sm_fitness = []
    ks_ga_fitness = []
    ks_mi_fitness = []
    for ks_iter in ks_iterations:
        #ks_rh_starttime = timeit.timeit()
        #ks_rh_bestfit  =randomHill(problem_ks,init_state_ks,max_attempts,ks_iter)
        #ks_rh_endtime = timeit.timeit()
        #print("RH Start Time")
예제 #17
0
#Knapsack Genetic
import mlrose
import mlrose
import numpy as np

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.metrics import accuracy_score

weights = [10, 5, 2, 8, 15]
values = [1, 2, 3, 4, 5]
max_weight_pct = 0.6

# Initialize fitness function object using pre-defined class
fitness = mlrose.Knapsack(weights, values, max_weight_pct)
# Define optimization problem object
problem = mlrose.DiscreteOpt(length = 5, fitness_fn = fitness, maximize=True, max_val=5)
# Define decay schedule
schedule = mlrose.ExpDecay()
# Solve using simulated annealing - attempt 1         
init_state = np.array([0, 1, 2, 3, 4])
# Start Timer
from timeit import default_timer as timer
start = timer()
# Run Simulated Annealing
best_state, best_fitness,fit_curve = mlrose.genetic_alg(problem,max_attempts = 1000, 
                                                      max_iters = 1000,
                                                      curve=True,random_state = 1)
# Stop Timer
elapsed_time = timer() - start # in seconds
예제 #18
0
            'elite': 0.3,
            'pop_size': ['5*problem_length'],
            'mutation_prob': 0.1,
            'max_attempts': 10
        },
    },
}

# for k, d in algorithms.items():
#     d['problem'] = problem
#     d['param_grid_problem'] = param_grid_problem

for algo_name, settings in algorithms.items():
    results[algo_name] = []
    for n_item in n_items:
        fitness_fn = mlrose.Knapsack(weights[n_item], values[n_item],
                                     maxWeight)

        param_grid_problem = {
            'fitness_fn': fitness_fn,
            'max_val': 2,
            'length': n_item,
        }
        thisSettings = copy.deepcopy(settings.copy())
        thisSettings['problem'] = problem
        thisSettings['param_grid_problem'] = param_grid_problem

        print(f"Looping for {algo_name}, n_item={n_item}")
        gs = mlrose.GridSearch(
            algorithm=thisSettings['algorithm'],
            param_grid_algorithm=thisSettings['param_grid_algorithm'],
            problem=thisSettings['problem'],
예제 #19
0
    mean_fitness_list.append(np.mean(fitness_score_MIMIC))
    mean_time_list.append(np.mean(time_score_MIMIC))
    fitness_list.append(fitness_score_MIMIC)
    print("mimic out ")
    # ===================================================================================
    # =============================  PLOT THE FINAL CURVE  ==============================================
    # ==================================================================================
    print("curve in")
    algorithms = ["RHC", "GA", "SA", "MIMIC"]
    algorithm_compare(fitness_list, algorithms, question, iterations)
    mean_time_compare(mean_time_list, algorithms, question)
    score_compare(mean_fitness_list, algorithms, question)
    print("curve out")


if __name__ == "__main__":
    print("Knap in")

    seed = 12
    w, v = get_data()
    fitness = mlrose.Knapsack(weights=w, values=v)
    problem = mlrose.DiscreteOpt(length=25, fitness_fn=fitness, maximize=True, max_val=2)
    print('Created Problem')

    find_best_param_ga_pop_size(seed, problem)
    find_best_param_ga_mutation_prob(seed, problem)
    find_best_param_sa_decay(seed, problem)
    find_best_param_mimic_keep_cpt(seed, problem)
    process_cp_with_four_algo(seed, problem)

    print("continue_peaks OUT")
예제 #20
0
def run_knapsack():

    if not os.path.exists('./output/Knapsack/'):
        os.mkdir('./output/Knapsack/')

    logger = logging.getLogger(__name__)
    problem_size = 25
    prob_size_int = int(problem_size)
    weights = [
        int(np.random.randint(1, prob_size_int / 2))
        for _ in range(prob_size_int)
    ]
    values = [
        int(np.random.randint(1, prob_size_int / 2))
        for _ in range(prob_size_int)
    ]
    flip_fit = mlrose.Knapsack(weights, values)
    flop_state_gen = lambda: np.random.randint(0, 1, size=prob_size_int)
    init_state = flop_state_gen()
    problem = mlrose.DiscreteOpt(length=prob_size_int,
                                 fitness_fn=flip_fit,
                                 maximize=True,
                                 max_val=2)

    all_results = {}
    """
    print("Running simulated annealing montecarlos")
    sa_results, sa_timing = sim_annealing_runner(problem)
    sa_best_params=plot_montecarlo_sensitivity('Knapsack', 'sim_anneal', sa_results)
    plot_montecarlo_sensitivity('Knapsack', 'sim_anneal_timing', sa_timing)
    all_results['SA'] = [sa_results, sa_timing]


    print("Running random hill montecarlos")
    rhc_results, rhc_timing = rhc_runner(problem)
    rhc_best_params = plot_montecarlo_sensitivity('Knapsack', 'rhc', rhc_results)
    plot_montecarlo_sensitivity('Knapsack', 'rhc_timing', rhc_timing)
    all_results['RHC'] = [rhc_results, rhc_timing]

    print("Running genetic algorithm montecarlos")
    ga_results, ga_timing = ga_runner(problem, init_state)
    ga_best_params = plot_montecarlo_sensitivity('Knapsack', 'ga', ga_results)
    plot_montecarlo_sensitivity('Knapsack', 'ga_timing', ga_timing)
    all_results['GA'] = [ga_results, ga_timing]

    print("Running MIMIC montecarlos")
    mimic_results, mimic_timing = mimic_runner(problem, init_state)
    MIMIC_best_params = plot_montecarlo_sensitivity('Knapsack', 'mimic', mimic_results)
    plot_montecarlo_sensitivity('Knapsack', 'mimic_timing', mimic_timing)
    all_results['MIMIC'] = [mimic_results, mimic_timing]

    """
    with open('./output/Knapsack/flipflip_data.pickle', 'wb') as handle:
        pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL)

    problem_size_space = np.linspace(5, 80, 10, dtype=int)

    best_fit_dict = {}
    best_fit_dict['Problem Size'] = problem_size_space
    best_fit_dict['Random Hill Climbing'] = []
    best_fit_dict['Simulated Annealing'] = []
    best_fit_dict['Genetic Algorithm'] = []
    best_fit_dict['MIMIC'] = []

    times = {}
    times['Problem Size'] = problem_size_space
    times['Random Hill Climbing'] = []
    times['Simulated Annealing'] = []
    times['Genetic Algorithm'] = []
    times['MIMIC'] = []

    fits_per_iteration = {}
    fits_per_iteration['Random Hill Climbing'] = []
    fits_per_iteration['Simulated Annealing'] = []
    fits_per_iteration['Genetic Algorithm'] = []
    fits_per_iteration['MIMIC'] = []

    for prob_size in problem_size_space:
        logger.info("---- Problem size: " + str(prob_size) + " ----")
        prob_size_int = int(prob_size)
        weights = [
            int(np.random.randint(1, prob_size / 2))
            for _ in range(prob_size_int)
        ]
        values = [
            int(np.random.randint(1, prob_size / 2))
            for _ in range(prob_size_int)
        ]
        flip_fit = mlrose.Knapsack(weights, values, max_weight_pct=0.5)
        flop_state_gen = lambda: np.random.randint(0, 1, size=prob_size_int)
        init_state = flop_state_gen()
        problem = mlrose.DiscreteOpt(length=prob_size_int,
                                     fitness_fn=flip_fit,
                                     maximize=True,
                                     max_val=2)

        start = datetime.now()
        best_state_sa, best_fitness_sa, fitness_curve_sa = mlrose.simulated_annealing(
            problem,
            schedule=mlrose.ExpDecay(exp_const=.701,
                                     init_temp=4.6,
                                     min_temp=.401),
            max_attempts=260,
            max_iters=1100,
            curve=True)
        best_fit_dict['Simulated Annealing'].append(best_fitness_sa)
        end = datetime.now()
        times['Simulated Annealing'].append((end - start).total_seconds())

        start = datetime.now()
        best_state_rhc, best_fitness_rhc, fitness_curve_rhc = mlrose.random_hill_climb(
            problem, max_attempts=260, max_iters=1100, restarts=40, curve=True)
        best_fit_dict['Random Hill Climbing'].append(best_fitness_rhc)
        end = datetime.now()
        times['Random Hill Climbing'].append((end - start).total_seconds())

        start = datetime.now()
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            problem,
            pop_size=99,
            mutation_prob=.01,
            max_attempts=410,
            max_iters=1100,
            curve=True)
        best_fit_dict['Genetic Algorithm'].append(best_fitness_ga)
        end = datetime.now()
        times['Genetic Algorithm'].append((end - start).total_seconds())

        start = datetime.now()
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            problem,
            pop_size=90,
            keep_pct=.21,
            max_attempts=10,
            max_iters=1100,
            curve=True)
        best_fit_dict['MIMIC'].append(best_fitness_mimic)
        end = datetime.now()
        times['MIMIC'].append((end - start).total_seconds())

    # For the last fit that occurs, save off the fit arrays that are generated. We will plot fitness/iteration.
    fits_per_iteration['Random Hill Climbing'] = fitness_curve_rhc
    fits_per_iteration['Simulated Annealing'] = fitness_curve_sa
    fits_per_iteration['Genetic Algorithm'] = fitness_curve_ga
    fits_per_iteration['MIMIC'] = fitness_curve_mimic

    fit_frame = pd.DataFrame.from_dict(best_fit_dict,
                                       orient='index').transpose()
    # fit_frame.pop('Unnamed: 0') # idk why this shows up.
    time_frame = pd.DataFrame.from_dict(times, orient='index').transpose()
    # time_frame.pop('Unnamed: 0') # idk why this shows up.
    fit_iteration_frame = pd.DataFrame.from_dict(fits_per_iteration,
                                                 orient='index').transpose()

    fit_frame.to_csv('./output/Knapsack/problem_size_fit.csv')
    time_frame.to_csv('./output/Knapsack/problem_size_time.csv')
    fit_iteration_frame.to_csv('./output/Knapsack/fit_per_iteration.csv')
예제 #21
0
import mlrose
import numpy as np


weights=[11,15,3,4,5,6,10,13]
values=[5,7,1,2,1,3,4,11]

bweights=np.random.rand(30)
bvals=np.random.rand(30)



fitness=mlrose.Knapsack(weights,values,0.35)

init_state=np.zeros(len(weights))



problem = mlrose.DiscreteOpt(length = len(weights), fitness_fn = fitness,
                             maximize = True,max_val=2)

'''Simulated Annealing'''

schedule = mlrose.GeomDecay()

# Set random seed
np.random.seed(1)

# SA
opt_state, opt_fit = mlrose.simulated_annealing(problem, schedule = schedule,
                                                      max_attempts = 100000, max_iters = 1000000000000,
예제 #22
0
파일: GA.py 프로젝트: WhateverLiu/RA7641
sys.modules['sklearn.externals.six'] = six
import mlrose
import numpy as np
import time

probSize = int(sys.argv[2])
thre = float(sys.argv[4])
popu = int(sys.argv[6])
mutP = float(sys.argv[8])
maxIter = int(sys.argv[10])
rseed = int(sys.argv[12])

np.random.seed(123)
weights = np.random.uniform(size=probSize)
values = np.random.uniform(size=probSize)
fitnessF = mlrose.Knapsack(weights=weights, values=values, max_weight_pct=thre)
problemFit = mlrose.DiscreteOpt(length=probSize,
                                fitness_fn=fitnessF,
                                maximize=True,
                                max_val=2)
# max_val: number of unique values each element in the state vector can take.

timeStart = time.time()
# Double checked. The new generation does not take in parent generation.
best_state, best_fitness, fitness_curve = mlrose.genetic_alg(
    problemFit,
    pop_size=popu,
    mutation_prob=mutP,
    max_attempts=int(1e9),
    max_iters=maxIter,
    curve=True,
예제 #23
0
def KnapSack(weights, values, sizeOrIterations):
    max_pct = .6
    fitness = mlrose.Knapsack(weights, values, max_pct)
    total = 0
    for item in weights:
        total = total + item
    print (total)
    problem = mlrose.DiscreteOpt(length = len(weights), fitness_fn = fitness, maximize = True, max_val = 3)

    rhcfitnessMatrix = []
    safitnessMatrix = []
    genalgfitnessMatrix = []
    mimicfitnessMatrix = []
    numIterations = 10000
    dataPoints = 100
    #rhc
    print("Begin RHC")
    startingTime = time()
    for i in range(numIterations):
        if i % dataPoints == 0 and not sizeOrIterations or i == 1000 and sizeOrIterations:
            print("RHC I: " + str(i))
            t0 = time()
            best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts=100, max_iters=i,
                              init_state=None)
            finish = time() - t0
            currentTime = time() - startingTime
            print("CurrentTime: " + str(currentTime))
            rhcfitnessMatrix.append((i, best_fitness, finish))
    finishtime = time() - startingTime
    print("Finish Time: " + str(finishtime))


    #simulated annealing
    schedule = mlrose.ExpDecay()
    startingTime = time()
    for i in range(numIterations):
        if i % dataPoints == 0 and not sizeOrIterations or i == 1000 and sizeOrIterations:
            print("SA I: " + str(i))
            t0 = time()
            best_state, best_fitness = mlrose.simulated_annealing(problem, schedule = schedule,
                                                      max_attempts = 100, max_iters = i,
                                                      init_state = None)
            finish = time() - t0
            currentTime = time() - startingTime
            print("CurrentTime: " + str(currentTime))
            safitnessMatrix.append((i, best_fitness, finish))
    finishtime = time() - startingTime
    print("Finish Time: " + str(finishtime))


    #genetic alg
    startingTime = time()
    for i in range(numIterations):
        if i % dataPoints == 0 and not sizeOrIterations or i == 1000 and sizeOrIterations:
            print("GA I: " + str(i))
            t0 = time()
            best_state, best_fitness = mlrose.genetic_alg(problem, pop_size=200, mutation_prob=0.1, max_attempts=100,
                max_iters=i)
            finish = time() - t0
            currentTime = time() - startingTime
            print("CurrentTime: " + str(currentTime))
            genalgfitnessMatrix.append((i, best_fitness, finish))
    finishtime = time() - startingTime
    print("Finish Time: " + str(finishtime))

    #mimic
    startingTime = time()
    for i in range(numIterations):
        if i % dataPoints == 0 and not sizeOrIterations or i == 1000 and sizeOrIterations:
            print("Mimic I: " + str(i))
            t0 = time()
            best_state, best_fitness = mlrose.mimic(problem, pop_size=200, keep_pct=0.3, max_attempts=100, max_iters=i)
            finish = time() - t0
            currentTime = time() - startingTime
            print("CurrentTime: " + str(currentTime))
            mimicfitnessMatrix.append((i, best_fitness, finish))
    finishtime = time() - startingTime
    print("Finish Time: " + str(finishtime))
    if not sizeOrIterations:
        writeToExcel.writeOptimzationProblem(rhcfitnessMatrix, safitnessMatrix, genalgfitnessMatrix, mimicfitnessMatrix, "KSIterations.xlsx")
        return None
    else:
        return rhcfitnessMatrix, safitnessMatrix, genalgfitnessMatrix, mimicfitnessMatrix