def get_prob(self, t_pct=None, p_length=None): if self.prob_name == 'Four Peaks': fitness = mlrose.FourPeaks(t_pct) p_len = 100 self.schedule = mlrose.ExpDecay() self.restarts = 0 self.mutation_prob = 0.1 self.keep_pct = 0.1 self.pop_size = 500 elif self.prob_name == "Continuous Peaks": fitness = mlrose.ContinuousPeaks(t_pct) p_len = 100 self.schedule = mlrose.GeomDecay() self.restarts = 0 self.mutation_prob = 0.1 self.keep_pct = 0.2 self.pop_size = 200 elif self.prob_name == "Max K Color": fitness = mlrose.MaxKColor(self.COLOREDGE) p_len = 100 self.schedule = mlrose.ExpDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.2 self.pop_size = 200 elif self.prob_name == "Flip Flop": fitness = mlrose.FlipFlop() p_len = 100 self.schedule = mlrose.ArithDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.5 self.pop_size = 500 elif self.prob_name == "One Max": fitness = mlrose.OneMax() p_len = 100 self.schedule = mlrose.GeomDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.1 self.pop_size = 100 else: fitness = None p_len = 0 if p_length is None: p_length = p_len problem = mlrose.DiscreteOpt(length=p_length, fitness_fn=fitness) init_state = np.random.randint(2, size=p_length) return problem, init_state
import six import sys sys.modules['sklearn.externals.six'] = six import mlrose import numpy as np import time probSize = int(sys.argv[2]) thre = float(sys.argv[4]) popu = int(sys.argv[6]) kepp = float(sys.argv[8]) maxIter = int(sys.argv[10]) rseed = int(sys.argv[12]) fitnessF = mlrose.ContinuousPeaks(t_pct=thre) problemFit = mlrose.DiscreteOpt(length=probSize, fitness_fn=fitnessF, maximize=True, max_val=2) # max_val: number of unique values each element in the state vector can take. timeStart = time.time() best_state, best_fitness, fitness_curve = mlrose.mimic(problemFit, pop_size=popu, keep_pct=kepp, max_attempts=int(1e9), max_iters=maxIter, curve=True, random_state=rseed, fast_mimic=True) timeEnd = time.time()
import time import mlrose import matplotlib.pyplot as plt import numpy as np import matplotlib.style as style import random import pandas as pd lengths = [10, 20, 40, 60, 80, 100] avgAcross = 10 print("SOLVING ContinuousPeaks") print("Get fitness for 100 iters on all algos") itersList = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100] problem = mlrose.DiscreteOpt(length=50, fitness_fn=mlrose.ContinuousPeaks()) fitnessRHCAll = [] fitnessSAAll = [] fitnessGAAll = [] fitnessMIMICAll = [] fitnessRHCMean = [] fitnessSAMean = [] fitnessGAMean = [] fitnessMIMICMean = [] fitnessRHCFilter = [] fitnessSAFilter = [] fitnessGAFilter = [] fitnessMIMICFilter = []
import mlrose import numpy as np from helpers import algos, model_helper, run_opt if __name__ == "__main__": fitness = mlrose.ContinuousPeaks(t_pct=0.15) sizes = [10, 20, 30, 40, 50, 60] for s in sizes: problem = mlrose.DiscreteOpt(length=s, fitness_fn=fitness, maximize=True, max_val=2) file_dir = "contpeaks/n={}/".format(s) run_opt.run_rhc(problem, max_attempts=10, max_iters=np.inf, restarts=0, n=10, filedir=file_dir) schedule = mlrose.GeomDecay(init_temp=1, decay=0.95, min_temp=0.001) run_opt.run_sa(problem, decay=0.95, schedule=schedule, max_iters=np.inf, filedir=file_dir) run_opt.run_ga(problem,
def run_cpeaks(): # If the output/Cpeaks directory doesn't exist, create it. if not os.path.exists('./output/CPeaks/'): os.mkdir('./output/CPeaks/') problem_size = 50 peaks_fit = mlrose.ContinuousPeaks(t_pct=.1) cpeaks_state_gen = lambda: np.random.randint(2, size=problem_size) init_state = cpeaks_state_gen() problem = mlrose.DiscreteOpt(length=problem_size, fitness_fn=peaks_fit, maximize=True, max_val=2) all_results = {} print("Running simulated annealing montecarlos") sa_results, sa_timing = sim_annealing_runner( problem, init_state, state_regenerator=cpeaks_state_gen) plot_montecarlo_sensitivity('CPeaks', 'sim_anneal', sa_results) plot_montecarlo_sensitivity('CPeaks', 'sim_anneal_timing', sa_timing) all_results['SA'] = [sa_results, sa_timing] print("Running random hill montecarlos") rhc_results, rhc_timing = rhc_runner(problem, init_state, state_regenerator=cpeaks_state_gen) plot_montecarlo_sensitivity('CPeaks', 'rhc', rhc_results) plot_montecarlo_sensitivity('CPeaks', 'rhc_timing', sa_timing) all_results['RHC'] = [rhc_results, rhc_timing] print("Running genetic algorithm montecarlos") ga_results, ga_timing = ga_runner(problem, init_state, state_regenerator=cpeaks_state_gen) plot_montecarlo_sensitivity('CPeaks', 'ga', ga_results) plot_montecarlo_sensitivity('CPeaks', 'ga_timing', ga_timing) all_results['GA'] = [ga_results, ga_timing] print("Running MIMIC montecarlos") mimic_results, mimic_timing = mimic_runner( problem, init_state, state_regenerator=cpeaks_state_gen) plot_montecarlo_sensitivity('CPeaks', 'mimic', mimic_results) plot_montecarlo_sensitivity('CPeaks', 'mimic_timing', mimic_timing) all_results['MIMIC'] = [mimic_results, mimic_timing] with open('./output/CPeaks/cpeaks_data.pickle', 'wb') as handle: pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL) problem_size_space = np.linspace(10, 100, 20, dtype=int) best_fit_dict = {} best_fit_dict['Problem Size'] = problem_size_space best_fit_dict['Random Hill Climbing'] = [] best_fit_dict['Simulated Annealing'] = [] best_fit_dict['Genetic Algorithm'] = [] best_fit_dict['MIMIC'] = [] times = {} times['Problem Size'] = problem_size_space times['Random Hill Climbing'] = [] times['Simulated Annealing'] = [] times['Genetic Algorithm'] = [] times['MIMIC'] = [] for prob_size in problem_size_space: logger.info("---- Problem size: " + str(prob_size) + " ----") prob_size_int = int(prob_size) peaks_fit = mlrose.ContinuousPeaks(t_pct=.2) problem = mlrose.DiscreteOpt(length=prob_size_int, fitness_fn=peaks_fit, maximize=True, max_val=2) cpeaks_state_gen = lambda: np.random.randint(2, size=prob_size_int) init_state = cpeaks_state_gen() start = datetime.now() _, best_fitness_sa, fit_array_sa = mlrose.simulated_annealing( problem, schedule=mlrose.ExpDecay(exp_const=.001, init_temp=5, min_temp=.01), max_attempts=50, max_iters=20000, init_state=init_state, track_fits=True) best_fit_dict['Simulated Annealing'].append(best_fitness_sa) end = datetime.now() times['Simulated Annealing'].append((end - start).total_seconds()) start = datetime.now() _, best_fitness_rhc, fit_array_rhc = mlrose.random_hill_climb( problem, max_attempts=100, max_iters=3000, restarts=50, track_fits=True) best_fit_dict['Random Hill Climbing'].append(best_fitness_rhc) end = datetime.now() times['Random Hill Climbing'].append((end - start).total_seconds()) start = datetime.now() _, best_fitness_ga, fit_array_ga = mlrose.genetic_alg( problem, pop_size=prob_size_int * 10, mutation_prob=.025, max_attempts=30, track_fits=True, max_iters=1000) best_fit_dict['Genetic Algorithm'].append(best_fitness_ga) end = datetime.now() times['Genetic Algorithm'].append((end - start).total_seconds()) start = datetime.now() _, best_fitness_mimic, fit_array_mimic = mlrose.mimic( problem, pop_size=prob_size_int * 10, keep_pct=.1, max_attempts=30, track_fits=True, max_iters=2000) best_fit_dict['MIMIC'].append(best_fitness_mimic) end = datetime.now() times['MIMIC'].append((end - start).total_seconds()) fits_per_iteration = {} fits_per_iteration['Random Hill Climbing'] = fit_array_rhc fits_per_iteration['Simulated Annealing'] = fit_array_sa fits_per_iteration['Genetic Algorithm'] = fit_array_ga fits_per_iteration['MIMIC'] = fit_array_mimic fit_frame = pd.DataFrame.from_dict(best_fit_dict, orient='index').transpose() # fit_frame.pop('Unnamed: 0') # idk why this shows up. time_frame = pd.DataFrame.from_dict(times, orient='index').transpose() # time_frame.pop('Unnamed: 0') # idk why this shows up. fit_iteration_frame = pd.DataFrame.from_dict(fits_per_iteration, orient='index').transpose() fit_frame.to_csv('./output/CPeaks/problem_size_fit.csv') time_frame.to_csv('./output/CPeaks/problem_size_time.csv') fit_iteration_frame.to_csv('./output/CPeaks/fit_per_iteration.csv')
def __discrete_bit_size_problems(problem, algorithm, length, max_iter, max_attempt, init_state, edges=None, coords=None): if problem == 'fourpeaks': __fit = mlrose.FourPeaks() __problem = mlrose.DiscreteOpt(length=length, fitness_fn=__fit, maximize=True, max_val=2) elif problem == 'kcolor': __fit = mlrose.MaxKColor(edges=edges) __problem = mlrose.DiscreteOpt(length=length, fitness_fn=__fit, maximize=True) elif problem == 'flipflop': __fit = mlrose.OneMax() __problem = mlrose.DiscreteOpt(length=length, fitness_fn=__fit, maximize=True, max_val=2) elif problem == 'continouspeaks': __fit = mlrose.ContinuousPeaks() __problem = mlrose.DiscreteOpt(length=length, fitness_fn=__fit, maximize=True, max_val=2) elif problem == 'travellingsales': __fit = mlrose.TravellingSales(coords=coords) __problem = mlrose.TSPOpt(length=length, fitness_fn=__fit, maximize=False) if algorithm == 'random_hill_climb': start_time = time.time() best_state, best_fitness, best_curve = mlrose.random_hill_climb( __problem, max_iters=max_iter, max_attempts=max_attempt, init_state=init_state, curve=True) end_time = time.time() - start_time elif algorithm == 'simulated_annealing': start_time = time.time() best_state, best_fitness, best_curve = mlrose.simulated_annealing( __problem, max_iters=max_iter, max_attempts=max_attempt, init_state=init_state, curve=True) end_time = time.time() - start_time elif algorithm == 'genetic_alg': start_time = time.time() best_state, best_fitness, best_curve = mlrose.genetic_alg( __problem, max_iters=max_iter, max_attempts=max_attempt, curve=True) end_time = time.time() - start_time elif algorithm == 'mimic': start_time = time.time() best_state, best_fitness, best_curve = mlrose.mimic( __problem, max_iters=max_iter, max_attempts=max_attempt, curve=True) end_time = time.time() - start_time return best_fitness, end_time, best_curve
axs[1].set_title("Best Fitness") best_fitness = stats["best_fitness"].unstack() if (best_fitness.max().max() > 1000000): axs[1].set_ylabel(r"$\frac{fitness}{10^{N}}$") best_fitness.apply(lambda x: x / np.power(10, x.name / 10), axis=1).plot(ax=axs[1]) else: best_fitness.plot(ax=axs[1]) axs[1].set_ylabel("fitness") axs[1].legend() return fig continuous_peaks = mlrose.ContinuousPeaks(t_pct=0.1) six_peaks = mlrose.SixPeaks(t_pct=0.1) flip_flop = mlrose.FlipFlop() product_consec_ones = mlrose.CustomFitness(fitness_fn=prod_consec_one, problem_type="discrete") count_ones = mlrose.CustomFitness(fitness_fn=lambda state: sum(state), problem_type="discrete") convert_bin_swap = mlrose.CustomFitness(fitness_fn=func_convert_bin_swap, problem_type="discrete") if __name__ == "__main__": records_by_prob = {six_peaks: [], flip_flop: [], convert_bin_swap: []} nbits = range(10, 101, 10) for fitness_fn, records in records_by_prob.items(): print(fitness_fn)