Ejemplo n.º 1
0
    def __run_with_rhc(self, init_weights, num_nodes, problem):
        fitness_curve = []
        fitted_weights = []
        loss = np.inf
        # Can't use restart feature of random_hill_climb function, since
        # want to keep initial weights in the range -1 to 1.
        for _ in range(self.restarts + 1):
            if init_weights is None:
                init_weights = np.random.uniform(-1, 1, num_nodes)

            if self.curve:
                current_weights, current_loss, fitness_curve = \
                    random_hill_climb(problem,
                                      max_attempts=self.max_attempts if
                                      self.early_stopping else
                                      self.max_iters,
                                      max_iters=self.max_iters,
                                      restarts=0, init_state=init_weights,
                                      curve=self.curve)
            else:
                current_weights, current_loss, _ = random_hill_climb(
                    problem,
                    max_attempts=self.max_attempts
                    if self.early_stopping else self.max_iters,
                    max_iters=self.max_iters,
                    restarts=0,
                    init_state=init_weights,
                    curve=self.curve)

            if current_loss < loss:
                fitted_weights = current_weights
                loss = current_loss
        return fitness_curve, fitted_weights, loss
Ejemplo n.º 2
0
def rhc_runner(problem):
    attempts = np.arange(10, 500, 50).astype(int)
    iterations = np.arange(10, 500, 50).astype(int)
    restarts = np.arange(0, 100, 20)

    scoring_dict = {}
    timing_dict = {}

    attempts_scores = []
    for i, att in enumerate(attempts):
        best_fits = []
        for i in MC_RUNS:
            _, best_fitness = mlrose.random_hill_climb(problem,
                                                       max_attempts=int(att),
                                                       max_iters=500)
            best_fits.append(best_fitness)

        attempts_scores.append(best_fits)
    scoring_dict['Number of Attempts'] = pd.DataFrame(attempts_scores,
                                                      columns=MC_RUNS,
                                                      index=attempts)

    iteration_scores = []
    for i, iteration in enumerate(iterations):

        best_fits = []
        for i in MC_RUNS:
            _, best_fitness = mlrose.random_hill_climb(
                problem, max_attempts=100, max_iters=int(iteration))
            best_fits.append(best_fitness)
        iteration_scores.append(best_fits)
    scoring_dict['Max Iterations'] = pd.DataFrame(iteration_scores,
                                                  columns=MC_RUNS,
                                                  index=iterations)

    iteration_scores = []
    iteration_timing = []
    for i, rst in enumerate(restarts):
        best_fits = []
        times = []
        for i in MC_RUNS:
            start = datetime.now()
            _, best_fitness = mlrose.random_hill_climb(problem,
                                                       max_attempts=100,
                                                       max_iters=500,
                                                       restarts=int(rst))
            end = datetime.now()
            best_fits.append(best_fitness)
            times.append((end - start).total_seconds())
        iteration_scores.append(best_fits)
        iteration_timing.append(times)
    scoring_dict['Random Restarts'] = pd.DataFrame(iteration_scores,
                                                   columns=MC_RUNS,
                                                   index=restarts)
    timing_dict['Random Restarts'] = pd.DataFrame(iteration_timing,
                                                  columns=MC_RUNS,
                                                  index=restarts)

    return scoring_dict, timing_dict
def run_rhc(prob, value_range, num_runs):
    avgs = []
    times = []

    for value in value_range:
        problem = prob(value)
        print("\t\tValue: " + str(value))

        run_vals = []
        run_times = []

        for run in range(0, num_runs):
            print("\t\t\tRun " + str(run))
            start = timeit.default_timer()

            best_state, best_fitness = mlrose.random_hill_climb(problem,
                                                                restarts=10)

            stop = timeit.default_timer()
            total_time = stop - start

            run_vals.append(best_fitness)
            run_times.append(total_time)

        avgs.append(np.mean(run_vals))
        times.append(np.mean(run_times))

    return avgs, times
Ejemplo n.º 4
0
 def random_hill_climb_max_attempts(problem, initial_state,
                                    attempts_percentage):
     attempts = int(problem.length * attempts_percentage)
     return mlrose.random_hill_climb(problem,
                                     init_state=initial_state,
                                     curve=False,
                                     max_attempts=attempts)
Ejemplo n.º 5
0
def runComplexityHill(pType, problem):

    if pType == 'One Max':
        neighbor = 20
        iterations = 400
    elif pType == 'Flip Flop':
        neighbor = 36
        iterations = 200
    else:
        neighbor = 75
        iterations = 400

    s = time()
    best_state, best_fitness = mlrose.random_hill_climb(problem,
                                                        max_attempts=neighbor,
                                                        max_iters=iterations,
                                                        curve=False,
                                                        random_state=1)
    # best_state, best_fitness, c = mlrose.random_hill_climb(problem,
    #                                                     max_attempts=neighbor,
    #                                                     max_iters=iterations,
    #                                                     curve=False,
    #                                                     random_state=1)
    timeTaken = time() - s
    return best_fitness, timeTaken
Ejemplo n.º 6
0
 def run_random_hill_climbing(init_state_problem):
     initial_state, problem = init_state_problem
     return mlrose.random_hill_climb(problem,
                                     init_state=initial_state,
                                     max_attempts=problem.length,
                                     restarts=5,
                                     curve=True)
Ejemplo n.º 7
0
def hill_climb(prob, **kwargs):
    start = time.time()
    _, best_score, curve, fit_evals = mlrose.random_hill_climb(prob(),
                                                               curve=True,
                                                               **kwargs)
    end = time.time()
    return np.array([best_score, len(curve), fit_evals, end - start])
Ejemplo n.º 8
0
    def RandomHillClimbing(self):
        start = time.time()
        KSbest_state, KSbest_fitness, KScurve = random_hill_climb(
            self.knapsack, curve=True)
        rhKST = time.time() - start

        start = time.time()
        FPbest_state, FPbest_fitness, FPcurve = random_hill_climb(
            self.fourpeaks, curve=True)
        rhFPT = time.time() - start

        start = time.time()
        CObest_state, CObest_fitness, COcurve = random_hill_climb(
            self.countones, curve=True)
        rhCOT = time.time() - start

        return KSbest_fitness, FPbest_fitness, CObest_fitness, rhKST, rhFPT, rhCOT, KScurve, FPcurve, COcurve
Ejemplo n.º 9
0
def randomHill(problem, init_state, max_attempts, iterations):
    rh_best_state, rh_best_fitness = mlrose.random_hill_climb(
        problem,
        max_attempts=max_attempts,
        max_iters=iterations,
        restarts=0,
        init_state=init_state,
        random_state=1)
    return rh_best_fitness
Ejemplo n.º 10
0
def optimize_rhc(problem, trials=100):
    fitness_values = []
    fitness_values_std = []
    times = []
    times_std = []

    restarts = range(0, 3001, 500)
    for restart in restarts:
        samples = []
        time_samples = []
        for _ in range(trials):
            start = time.time()
            _, fitness_value, _ = mlrose.random_hill_climb(problem,
                                                           restarts=restart)
            time_samples.append(time.time() - start)
            samples.append(fitness_value)
        fitness_values.append(np.mean(samples))
        fitness_values_std.append(np.std(samples))
        times.append(np.mean(time_samples))
        times_std.append(np.std(time_samples))

    fitness_values = np.array(fitness_values)
    fitness_values_std = np.array(fitness_values_std)
    times = np.array(times)
    times_std = np.array(times_std)

    with plot.style.context('seaborn-darkgrid'):
        fig, ax1 = plot.subplots()
        plot.title(f'Influence of the number of restarts on RHC')
        ax1.set_xlabel('Number of restarts')
        ax1.set_ylabel('Fitness')
        ax1.tick_params(axis='y')

        ax1.fill_between(restarts,
                         fitness_values + fitness_values_std / 2,
                         fitness_values - fitness_values_std / 2,
                         alpha=0.5)

        ax1.plot(restarts, fitness_values, 'o-')

        with plot.style.context('default'):
            ax2 = ax1.twinx()

            ax2.set_ylabel('Computing time (s)')
            ax2.fill_between(restarts,
                             times + times_std / 2,
                             times - times_std / 2,
                             alpha=0.5,
                             color='darkorange')

            ax2.plot(restarts, times, color='darkorange')
            ax2.tick_params(axis='y')

        fig.tight_layout()

        plot.show()
def fit(length, fitness):
    problem = mlrose.DiscreteOpt(length = length, fitness_fn = fitness, maximize = True, max_val = 2)

    iterations = [10,50,100,200,400,800,1600,3200]
    RHC, SA, GA, MM = ([],[],[],[])
    time_RHC, time_SA, time_GA, time_MM = ([],[],[],[])

    for iter in iterations:
        print ("max iterations = " + str(iter))
        start_time = time.time()
        best_fitness = 0
        for times in range(10):
          best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts = 10, max_iters = iter, restarts = 0, init_state = np.random.randint(2, size=(length,)))
          best_fitness = max(best_fitness, best_fitness)
          #print(best_state)
        RHC.append(best_fitness)
        print(best_fitness)
        time_RHC.append((time.time() - start_time)/10)
        
        start_time = time.time()
        best_fitness = 0
        for times in range(10):
          best_state, best_fitness = mlrose.simulated_annealing(problem, schedule = mlrose.GeomDecay(), max_attempts = 10, max_iters = iter, init_state = np.random.randint(2, size=(length,)))
          best_fitness = max(best_fitness, best_fitness)
          #print(best_state)
        SA.append(best_fitness)
        print(best_fitness)
        time_SA.append((time.time() - start_time)/10)

        start_time = time.time()
        best_fitness = 0
        best_state, best_fitness = mlrose.genetic_alg(problem, pop_size = 200, mutation_prob = 0.1, max_attempts = 10, max_iters = iter)
        #print(best_state)
        GA.append(best_fitness)
        print(best_fitness)
        time_GA.append((time.time() - start_time))

        start_time = time.time()
        best_fitness = 0
        best_state, best_fitness = mlrose.mimic(problem, pop_size = 200, keep_pct = 0.2, max_attempts = 10, max_iters = iter)
        #print(best_state)
        MM.append(best_fitness)
        print(best_fitness)
        time_MM.append((time.time() - start_time))
    
    plot(RHC, SA, GA, MM, time_RHC, time_SA, time_GA, time_MM, iterations)
    filewrite_array("iterations:", iterations)
    filewrite_array("Fitness(RHC):", RHC)
    filewrite_array("Fitness(SA):", SA)
    filewrite_array("Fitness(GA):", GA)
    filewrite_array("Fitness(MM):", MM)
    filewrite_array("Fitness(time_RHC):", time_RHC)
    filewrite_array("Fitness(time_SA):", time_SA)
    filewrite_array("Fitness(time_GA):", time_GA)
    filewrite_array("Fitness(time_MM):", time_MM)
Ejemplo n.º 12
0
    def test_random_hill_climb_discrete_max():
        """Test random_hill_climb function for a discrete maximization
        problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        _, _, curve = random_hill_climb(problem,
                                        max_attempts=10,
                                        restarts=20,
                                        timing=True)

        assert (curve.shape[1] == 2)
Ejemplo n.º 13
0
    def test_random_hill_climb_curve_length_max_iters():
        """Test random_hill_climb function such that when curve is True for ma_iters
        the length of all fitness scores should be equal to max_iters"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        x = np.array([0, 0, 0, 0, 0])

        max_iters = 300

        best_state, best_fitness, all_fitness = random_hill_climb(
            problem, max_iters=max_iters, restarts=0, init_state=x, curve=True)
        assert len(all_fitness) == max_iters
Ejemplo n.º 14
0
 def rhc(self, max_attempts=10, max_iters=500, restarts=100):
     start = time.time()
     best_state, best_fitness, curve = random_hill_climb(
         self.problem_fit,
         max_attempts=max_attempts,
         max_iters=max_iters,
         restarts=restarts,
         curve=True,
         random_state=111)
     end = time.time()
     time_elapsed = end - start
     return [best_fitness, time_elapsed, curve]
Ejemplo n.º 15
0
def optimization(problem):
    df_optim = pd.DataFrame(columns=algos)
    df_iter = pd.DataFrame(columns=algos)
    df_time = pd.DataFrame(columns=algos)

    for rs in random_states:
        # RHC
        tic = time.process_time()
        best_state_rhc, best_fitness_rhc, curve_rhc = \
            mlrose.random_hill_climb(problem, max_attempts=20, max_iters=100000,
                                     restarts=0, init_state=None, curve=True, random_state=rs)
        toc = time.process_time()
        time_rhc = toc - tic

        # SA
        tic = time.process_time()
        best_state_sa, best_fitness_sa, curve_sa = \
            mlrose.simulated_annealing(problem, schedule=mlrose.ExpDecay(init_temp=1.0, exp_const=0.005, min_temp=0.001),
                                       max_attempts = 20, max_iters = 100000,
                                       init_state = None, curve = True, random_state = rs)
        toc = time.process_time()
        time_sa = toc - tic

        # GA
        tic = time.process_time()
        best_state_ga, best_fitness_ga, curve_ga = \
            mlrose.genetic_alg(problem, pop_size=200, mutation_prob=0.1, max_attempts=20, max_iters=100000,
                               curve=True, random_state=rs)
        toc = time.process_time()
        time_ga = toc - tic

        # MIMIC
        tic = time.process_time()
        best_state_m, best_fitness_m, curve_m = \
            mlrose.mimic(problem, pop_size=20, keep_pct=0.2, max_attempts=20, max_iters=100000,
                         curve=True, random_state=rs, fast_mimic=False)
        toc = time.process_time()
        time_m = toc - tic

        # df
        df_optim.loc[len(df_optim)] = [
            best_fitness_rhc, best_fitness_sa, best_fitness_ga, best_fitness_m
        ]
        df_iter.loc[len(df_iter)] = [
            len(curve_rhc),
            len(curve_sa),
            len(curve_ga),
            len(curve_m)
        ]
        df_time.loc[len(df_time)] = [time_rhc, time_sa, time_ga, time_m]
        print(rs)

    return (df_optim.mean(axis=0), df_iter.mean(axis=0), df_time.mean(axis=0))
Ejemplo n.º 16
0
    def test_random_hill_climb_discrete_max():
        """Test random_hill_climb function for a discrete maximization
        problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        best_state, best_fitness, _ = random_hill_climb(problem,
                                                        max_attempts=10,
                                                        restarts=20)

        x = np.array([1, 1, 1, 1, 1])

        assert (np.array_equal(best_state, x) and best_fitness == 5)
Ejemplo n.º 17
0
def maxKColor(edges, nodes, colors):
    fitness = mlrose.MaxKColor(edges)

    problem = mlrose.DiscreteOpt(length = nodes, fitness_fn = fitness, maximize = False, max_val = colors)
    t0 = time()
    best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts=100, max_iters=np.inf,
                          init_state=None)
    finish = time() - t0

    print(best_state)
    print(best_fitness)
    print(finish)
Ejemplo n.º 18
0
    def test_random_hill_climb_continuous_min():
        """Test random_hill_climb function for a continuous minimization
        problem"""

        problem = ContinuousOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = random_hill_climb(problem,
                                                        max_attempts=10,
                                                        restarts=20)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.array_equal(best_state, x) and best_fitness == 0)
Ejemplo n.º 19
0
def final_test(problem, ga, sa, rhc, mimic, trials=50):
    ga_samples = []
    sa_samples = []
    rhc_samples = []
    mimic_samples = []
    ga_time_samples = []
    sa_time_samples = []
    rhc_time_samples = []
    mimic_time_samples = []

    for _ in range(trials):
        start = time.time()
        _, ga_fitness, _ = mlrose.genetic_alg(problem,
                                              pop_size=ga[0],
                                              mutation_prob=ga[1])
        ga_time_samples.append(time.time() - start)
        start = time.time()
        _, sa_fitness, _ = mlrose.simulated_annealing(problem, sa)
        sa_time_samples.append(time.time() - start)
        start = time.time()
        _, rhc_fitness, _ = mlrose.random_hill_climb(problem, rhc)
        rhc_time_samples.append(time.time() - start)
        start = time.time()
        _, mimic_fitness, _ = mlrose.mimic(problem,
                                           pop_size=mimic[0],
                                           keep_pct=mimic[1])
        mimic_time_samples.append(time.time() - start)

        ga_samples.append(ga_fitness)
        sa_samples.append(sa_fitness)
        rhc_samples.append(rhc_fitness)
        mimic_samples.append(mimic_fitness)

    fitness_name = repr(problem.fitness_fn).split('.')[-1].split(' ')[0]
    if fitness_name == 'CustomFitness':
        fitness_name = 'Saw'
    print(f'Final results on {fitness_name}')
    print()
    print(f'GA max: {np.max(ga_samples)}')
    print(f'SA max: {np.max(sa_samples)}')
    print(f'RHC max: {np.max(rhc_samples)}')
    print(f'MIMIC max: {np.max(mimic_samples)}')
    print()
    print(f'GA mean: {np.mean(ga_samples)}')
    print(f'SA mean: {np.mean(sa_samples)}')
    print(f'RHC mean: {np.mean(rhc_samples)}')
    print(f'MIMIC mean: {np.mean(mimic_samples)}')
    print()
    print(f'GA mean execution time: {np.mean(ga_time_samples)}')
    print(f'SA mean execution time: {np.mean(sa_time_samples)}')
    print(f'RHC mean execution time: {np.mean(rhc_time_samples)}')
    print(f'MIMIC mean execution time: {np.mean(mimic_time_samples)}')
Ejemplo n.º 20
0
    def test_random_hill_climb_max_iters():
        """Test random_hill_climb function with max_iters less than infinite"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        x = np.array([0, 0, 0, 0, 0])

        best_state, best_fitness, _ = random_hill_climb(problem,
                                                        max_attempts=1,
                                                        max_iters=1,
                                                        restarts=0,
                                                        init_state=x)

        assert best_fitness == 1
Ejemplo n.º 21
0
    def optimize(self):

        best_state, best_fitness, fitness_curve = mlrose.random_hill_climb(
            self.problem,
            max_attempts=self.max_attempts,
            max_iters=self.max_iters,
            restarts=self.restarts,
            init_state=None,  #self.init_state, 
            curve=True,
            random_state=self.random_state)

        #print('best_state '+ str(best_state))
        #print('best_fitness '+ str(best_fitness))
        #print('fitness_curve '+ str(fitness_curve))
        return best_state, best_fitness
def radomHillClimb(fitness, x):
    # This code was originally taken and modified from https://mlrose.readthedocs.io/en/stable/source/intro.html
    start = time.time()

    # Initialize fitness function object using pre-defined class
    #fitness = mlrose.Queens()

    # Define optimization problem object
    if (x == 0):
        problem = mlrose.DiscreteOpt(length=12,
                                     fitness_fn=fitness,
                                     maximize=False,
                                     max_val=12)
    elif (x == 1):
        problem = mlrose.DiscreteOpt(length=9,
                                     fitness_fn=fitness,
                                     maximize=False,
                                     max_val=3)
    else:
        problem = mlrose.DiscreteOpt(length=8,
                                     fitness_fn=fitness,
                                     maximize=True,
                                     max_val=8)

    # Solve using random hill climb
    if (x == 0):
        init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
    elif (x == 1):
        init_state = np.array([0, 1, 2, 0, 1, 2, 0, 1, 1])
    else:
        init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7])

    best_state, best_fitness, fitness_curve = mlrose.random_hill_climb(
        problem,
        restarts=10,
        init_state=init_state,
        max_iters=1000,
        random_state=1,
        curve=True)

    end = time.time()

    print("Random Hill Climb:")
    print('The best state found is: ', best_state)
    print('The fitness at the best state is: ', best_fitness)
    print("Time: " + str(end - start))
    return best_fitness, end - start
Ejemplo n.º 23
0
def random_hill_climb(problem, starting_cell):
    """
    Random Hill climbing implementation to minimize TSP
    Inputs: 
            problem --> optimization problem object required by mlrose
            starting_cell --> starting cell number, index starting from 1
    Outputs: 
            optimized_cells --> cells to visit
    """
    # mlrose requires indices start from zero
    # mlrose also requires init_state to be a 1D np array
    #init_cell = np.array([starting_cell-1])
    #init_cell = np.array([0,2,3,4,5,6,7,8,9,10,11,12,1])

    optimized_cells, _ = mlrose.random_hill_climb(problem)

    # Add 1 to all cells since in our case, cell numbers start from one not zero!
    #optimized_cells += 1

    return optimized_cells
Ejemplo n.º 24
0
    def run(self, n=1):
        """
        n : the number of runs to do 
        returns best_fitnesses (list), learning_curves (list)
        """
        best_fitnesses = []
        learning_curves = []
        for i in np.arange(n):
            _, _, learning_curve = mlrose.random_hill_climb(
                self.problem,
                max_attempts=self.max_attempts,
                max_iters=self.max_iters,
                restarts=self.restarts,
                random_state=None,
                curve=True)
            best_fitness = np.max(learning_curve)
            best_fitnesses.append(best_fitness)
            learning_curves.append(learning_curve)

        return best_fitnesses, learning_curves
Ejemplo n.º 25
0
    def iteration_curve(self):
        print("Creating iteration curve")
        problem, init_state = self.get_prob(t_pct=0.15)
        best_state_rhc, best_fitness_rhc, fitness_curve_rhc = mlrose.random_hill_climb(
            problem,
            max_attempts=1000,
            max_iters=5000,
            init_state=init_state,
            curve=True)
        best_state_sa, best_fitness_sa, fitness_curve_sa = mlrose.simulated_annealing(
            problem,
            max_attempts=10000,
            max_iters=5000,
            init_state=init_state,
            curve=True)
        problem, init_state = self.get_prob(t_pct=0.15)
        best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(
            problem, max_attempts=1000, max_iters=5000, curve=True)
        problem, init_state = self.get_prob(t_pct=0.15)
        best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(
            problem,
            pop_size=self.pop_size,
            max_attempts=100,
            max_iters=5000,
            curve=True)

        plt.figure()
        plt.plot(fitness_curve_rhc, label='RHC')
        plt.plot(fitness_curve_sa, label='SA')
        plt.plot(fitness_curve_ga, label='GA')
        plt.plot(fitness_curve_mimic, label='MIMIC')
        plt.legend(loc="best")
        plt.ylabel('Fitness')
        plt.xlabel('Number of Iterations')
        plt.title("{}: Fitness vs. Number of Iterations".format(
            self.prob_name))
        # ax = plt.gca()
        # ax.set_xscale('log')
        plt.savefig(
            os.path.join(self.output_path,
                         "{}_iterations.png".format(self.prob_name)))
Ejemplo n.º 26
0
def runHill(problem, basePath):
    iterations = 1000
    restart = 0
    neighborhood = [2, 4, 12, 36]
    neighborhood = [20]
    neighborhood = [2, 4, 12, 36, 50, 75]
    neighborhood = [2, 75]
    fig, ax = plt.subplots()
    plt.title('Random Hill')
    # fig.tight_layout()

    times = np.zeros((len(neighborhood), iterations))
    nIndex = 0
    for neighbor in neighborhood:
        s = time()
        x = []
        y = []
        for i in range(1, iterations + 1):
            best_state, best_fitness = mlrose.random_hill_climb(
                problem,
                max_attempts=neighbor,
                restarts=restart,
                max_iters=i,
                curve=False,
                random_state=1)
            x.append(i)
            y.append(best_fitness)
            e = time()
            timeTaken = e - s
            times[nIndex, i - 1] = timeTaken
            print('Itt: {0} - Time:{1}'.format(i, timeTaken))
        nIndex += 1
        plotLine(x, y, ax, 'Neighbors: {0}'.format(neighbor))
    plotTime(x, times, ax)
    showLegend(fig, ax)

    if basePath:
        plt.savefig('{0}\\{1}.png'.format(basePath, 'Hill'))
    else:
        plt.show()
    return
Ejemplo n.º 27
0
    def rhc(self):
        print("Creating RHC curve")
        problem, init_state = self.get_prob(t_pct=0.15)
        plt.close()
        plt.figure()
        for restarts in range(0, 11, 2):
            _, _, fitness_curve = mlrose.random_hill_climb(
                problem,
                restarts=restarts,
                max_attempts=100,
                max_iters=1000,
                init_state=init_state,
                curve=True)
            plt.plot(fitness_curve, label="restarts={}".format(restarts))

        plt.title("{}: Randomized Hill Climbing".format(self.prob_name))
        plt.legend(loc="best")
        plt.xlabel('Number of Iterations')
        plt.ylabel('Fitness')
        plt.savefig(
            os.path.join(self.output_path,
                         "{}_RHC Analysis.png".format(self.prob_name)))
Ejemplo n.º 28
0
    def test_random_hill(self,
                         title,
                         max_attempts_range=[100],
                         random_restarts_range=[0]):
        print(title + " Random Hill Climbing Algorithm")
        So is there
        fig.suptitle(title + " Random Hill Climb")
        best = [0, 0, 0]
        for m in max_attempts_range:
            fitness_arr = []
            time_arr = []
            for r in random_restarts_range:
                start = time.time()
                best_state, best_fitness, curve = mlrose.random_hill_climb(
                    self.problem_fit,
                    max_attempts=m,
                    max_iters=np.inf,
                    restarts=r,
                    curve=True)
                fitness_arr.append(best_fitness)
                time_arr.append(round(time.time() - start, 2))
                if best_fitness > best[2]:
                    best[0] = m
                    best[1] = r
                    best[2] = best_fitness

            ax1.plot(random_restarts_range, fitness_arr, label=m, lw=2)
            ax2.plot(random_restarts_range, time_arr, lw=2)

        ax1.set(xlabel="# Restarts", ylabel="Fitness")
        ax2.set(xlabel="# Restarts", ylabel="Time(s)")
        fig.legend(loc='center right', title='Attempts')
        print(title +
              " RHC max_attempts={a}, # restarts={b}, best_fitness={c}".format(
                  a=best[0], b=best[1], c=best[2]))
        #ax1.text(x=0.05, y=0.95,s="max_attempts={a}\n# restarts={b}\nbest_fitness={c}".format(a=best[0], b=best[1], c=best[2]))
        plt.tight_layout()
        self.saveToNewDir(fig, "./", title + "_Random_Hill_Climb.png")
        plt.clf()
Ejemplo n.º 29
0
def runalgos(problem, max_attempts, iterations):
    #Define decay schedule
    schedule = mlrose.ExpDecay()
    #Solve problem using Random Hill Climbing
    rh_best_state, rh_best_fitness = mlrose.random_hill_climb(
        problem,
        max_attempts=max_attempts,
        max_iters=iterations,
        restarts=1,
        init_state=init_state,
        random_state=1)
    #Solve problem using Simulated Annealing
    sm_best_state, sm_best_fitness = mlrose.simulated_annealing(
        problem,
        schedule=schedule,
        max_attempts=max_attempts,
        max_iters=iterations,
        curve=False,
        random_state=1)
    #Solve problem using Gentic Algorithm
    genA_best_state, genA_best_fitness = mlrose.genetic_alg(
        problem,
        pop_size=200,
        mutation_prob=0.1,
        max_attempts=max_attempts,
        max_iters=iterations,
        curve=False,
        random_state=1)
    #Solve problem using MIMIC
    mimic_best_state, mimic_best_fitness = mlrose.mimic(
        problem,
        pop_size=200,
        keep_pct=0.2,
        max_attempts=max_attempts,
        max_iters=iterations,
        curve=False,
        random_state=1)
    #return rh_best_state,rh_best_fitness,sm_best_state,sm_best_fitness,genA_best_state,genA_best_fitness,mimic_best_state,mimic_best_fitness
    return rh_best_fitness, sm_best_fitness, genA_best_fitness, mimic_best_fitness
Ejemplo n.º 30
0
    ]].idxmin()]  # from the output of the experiment above
    return rhc_df_run_stats, rhc_df_run_curves, ideal_rs


# # Done on a complex example then  hard coded optimized parameter value(s).
# rhc_df_run_stats, rhc_df_run_curves, ideal_rs = opt_rhc_params()

ideal_rs = 50  # this came from the results of the experiment commented out, above.
rhc_best_state = []
rhc_best_fitness = []
rhc_convergence_time = []
for iter in iterations_range:
    start_time = timeit.default_timer()
    best_state, best_fitness, curve = mlrose.random_hill_climb(
        problem=problem,
        max_iters=iter,
        max_attempts=500,
        restarts=ideal_rs,
        curve=True)
    end_time = timeit.default_timer()
    convergence_time = (end_time - start_time)  # seconds
    rhc_best_state.append(best_state)
    rhc_best_fitness.append(best_fitness)
    rhc_convergence_time.append(convergence_time)

print('The fitness at the best state found using Random Hill Climbing is: ',
      min(rhc_best_fitness))

#========== Genetic Algorithms ==========#
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
print("Starting Genetic Algorithms at: " + time)