def simulatedAnnealing(fitness, x): # This code was originally taken and modified from https://mlrose.readthedocs.io/en/stable/source/intro.html start = time.time() # Initialize fitness function object using pre-defined class #fitness = mlrose.Queens() # Define optimization problem object if (x == 0): problem = mlrose.DiscreteOpt(length=12, fitness_fn=fitness, maximize=False, max_val=12) elif (x == 1): problem = mlrose.DiscreteOpt(length=9, fitness_fn=fitness, maximize=False, max_val=3) else: problem = mlrose.DiscreteOpt(length=8, fitness_fn=fitness, maximize=True, max_val=8) # Define decay schedule schedule = mlrose.GeomDecay() # Solve using random hill climb if (x == 0): init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) elif (x == 1): init_state = np.array([0, 1, 2, 0, 1, 2, 0, 1, 1]) else: init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7]) best_state, best_fitness, fitness_curve = mlrose.simulated_annealing( problem, schedule=schedule, max_attempts=10, max_iters=1000, init_state=init_state, random_state=1, curve=True) end = time.time() print("Simulated Annealing:") print('The best state found is: ', best_state) print('The fitness at the best state is: ', best_fitness) print("Time: " + str(end - start)) return best_fitness, end - start
def gen_problem_fourpeaks(problem_size): fitness = mlr.FourPeaks(t_pct=0.25) maximize = True problem = mlr.DiscreteOpt(length=problem_size, fitness_fn=fitness, maximize=maximize) return problem, maximize
def gen_problem_onemax(problem_size): fitness = mlr.OneMax() maximize = True problem = mlr.DiscreteOpt(length=problem_size, fitness_fn=fitness, maximize=maximize) return problem, maximize
def FlipFlop(self, length=8, verbose=False): self.problem = 'flipflop{l}'.format(l=length) self.verbose = verbose fitness_fn = mlrose.FlipFlop() self.problem_fit = mlrose.DiscreteOpt(length=length, fitness_fn=fitness_fn, maximize=True)
def queens_problem(max_attempts, max_iters): fitness_cust = mlrose.CustomFitness(queens_max) problem = mlrose.DiscreteOpt(length=8, fitness_fn=fitness, maximize=False, max_val=8) #problem = mlrose.DiscreteOpt(length=8, fitness_fn=queens_max, maximize=True, max_val=8) # Define decay schedule schedule = mlrose.ExpDecay() # Define initial state init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7]) # Solve problem using simulated annealing best_state, best_fitness = mlrose.simulated_annealing( problem, schedule=schedule, max_attempts=max_attempts, max_iters=max_iters, init_state=init_state, random_state=1) print(best_state) print(best_fitness)
def create_problem(self): fitness = mlrose.SixPeaks(t_pct=self.t_pct) problem = mlrose.DiscreteOpt(length=self.length, fitness_fn=fitness, maximize=True, max_val=2) return problem
def create_problem(self): fitness = mlrose.FlipFlop() problem = mlrose.DiscreteOpt(length=self.length, fitness_fn=fitness, maximize=True, max_val=2) return problem
def flipflop_factory(length=30): fitness = count_evaluations(mlrose.FlipFlop) fitness_final = mlrose.CustomFitness(fitness) flipflop = mlrose.DiscreteOpt(length, fitness_final) global_optimum = length - 1 return flipflop, global_optimum
def optimize(state_length, fitness_fn, algorithm, algorithm_kwargs, **cust_fitness_fn_kwargs): """Uses optimization techniques to identify binary state vector that minimizes fitness (MOER) over forecast period. Args: state_length (int): length of state_vector to be optimized (minimized). (represents length of forecast in periods) ex: 1 hr forecast series given in 5 minute periods would have a length of 12. fitness_fn: callable Function for calculating fitness of a state with the signature fitness_fn(state, **kwargs) algorithm (mlrose optimization object): One of: mlrose.simulated_anneling, mlrose.random_hill_climb mlrose.hill_climb, mlrose.genetic_alg, or mlrose.mimic. See mlrose documentation for details. algorithm_kwargs (dict): kwargs for mlrose optimization algorithims. Returns: best_state: (array) Numpy array containing state that optimizes the fitness function. best_fitness: (float) Value of fitness (MOER) at best state. curve: (array) Numpy array containing the fitness at every iteration. Must include in kwargs curve=True. """ #create custom fitness class using mlrose constructor cust_fitness_fn = mlrose.CustomFitness(fitness_fn, **cust_fitness_fn_kwargs) #define problem using mlrose constructor prob = mlrose.DiscreteOpt(length=state_length, fitness_fn=cust_fitness_fn, maximize=False, max_val=2) #set initial state using heuristic to accelerate optimization init_state = initial_state(cust_fitness_fn_kwargs.get('fridge_temp'),state_length=state_length) #use mlrose optimization algo to find state vector with minimum MOER best_state, best_fitness, curve = algorithm(prob,init_state=init_state, **algorithm_kwargs) return best_state, best_fitness, curve
def main(): name_of_exp = "One Max" fitness = mlrose.OneMax() mimic = [] z_s = ['RHC', 'SA', 'GA', 'MIMIC'] for i in [100, 200, 300, 400, 500]: problem = mlrose.DiscreteOpt(length=15, fitness_fn=fitness, maximize=True, max_val=2) print("MIMC") max_atts = 10 best_state, best_fitness, learning_curve, timing_curve = mlrose.mimic( problem, pop_size=i, keep_pct=0.1, max_attempts=100, max_iters=100, curve=True, random_state=1, fast_mimic=True) mimic.append(learning_curve) print(i) print(best_fitness) print(max_atts) for x, z in zip([100, 200, 300, 400, 500], mimic): plt.plot(z, label=str(x)) plt.legend() plt.title( 'MIMIC Randomized Optimization PopSize vs Fitness Curve (OneMax)') plt.xlabel('Function iteration count') plt.ylabel('Fitness function value') plt.show()
def main(): name_of_exp = "Eight Queens" fitness = mlrose.CustomFitness(queens_max) problem = mlrose.DiscreteOpt(length=8, fitness_fn=fitness, maximize=True, max_val=8) # Define initial state mimic = [] init_state = np.array([0, 1, 2, 3, 4, 5, 6, 7]) for i in [mlrose.ExpDecay(), mlrose.GeomDecay(), mlrose.ArithDecay()]: best_state, best_fitness, learning_curve, timing_curve = mlrose.simulated_annealing( problem, init_state=init_state, schedule=i, max_attempts=1000, max_iters=1000, curve=True, random_state=1) mimic.append(learning_curve) print(i) print(best_fitness) for x, z in zip(['Exp', 'Geom', 'Arith'], mimic): plt.plot(z, label=str(x)) plt.legend() plt.title( 'SA Randomized Optimization DecaySchedule vs Fitness Curve (8-Queens)') plt.xlabel('Function iteration count') plt.ylabel('Fitness function value') plt.show()
def create_problem(self): edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)] fitness = mlrose.FlipFlop() problem = mlrose.DiscreteOpt(length=self.length, fitness_fn=fitness, maximize=True, max_val=2) return problem
def FourPeaks(self, length=10, t_pct=0.1, verbose=False): self.problem = 'fourpeaks{l}'.format(l=length) self.verbose = verbose fitness_fn = mlrose.FourPeaks(t_pct=t_pct) # define optimization problem object self.problem_fit = mlrose.DiscreteOpt(length=length, fitness_fn=fitness_fn, maximize=True)
def one_max(bit_length=50): fitness_fn = mlrose.OneMax() problem = mlrose.DiscreteOpt(length=bit_length, fitness_fn=fitness_fn, max_val=2) return problem
def gen_problem_pairiodic(problem_size): fitness = mlr.CustomFitness(fitness_fn=pairiodic6, problem_type='discrete') maximize = True problem = mlr.DiscreteOpt(length=problem_size, fitness_fn=fitness, maximize=maximize, max_val=2) return problem, maximize
def flipflop(max_iter=500, early_stop=None, mimic_early_stop=100, n_runs=10, savedir=None): print('\n\n|========= Flip Flop =========|\n') fitness = mlrose.FlipFlop() problem_size = [500] max_attempts = max_iter * 2 if early_stop is None else early_stop mimic_early_stop = max_attempts if mimic_early_stop is None else mimic_early_stop hyperparams = { 'rhc': { 'restarts': 0, 'max_attempts': max_attempts }, 'mimic': { 'pop_size': 500, 'keep_pct': 0.2, 'max_attempts': mimic_early_stop, 'fast_mimic': True }, 'sa': { 'schedule': mlrose.GeomDecay(), 'init_state': None, 'max_attempts': max_attempts }, 'ga': { 'pop_size': 1000, 'mutation_prob': 0.2, 'pop_breed_percent': 0.75, 'elite_dreg_ratio': 0.95, 'max_attempts': mimic_early_stop } } print('Hyperparameters: ', hyperparams) results = [] runtimes = [] timings = {} for ps in problem_size: problem = mlrose.DiscreteOpt(ps, fitness, max_val=2, maximize=True) print('Running with input size', ps) print('-----------------------------') r, t, wt = util.optimize_iters(problem, max_iter, hyperparams, n_runs) results.append(r) runtimes.append(t) timings['ps{}'.format(ps)] = wt print('final runtimes') t = pd.DataFrame(runtimes, index=problem_size) print(t) if savedir: util.save_output('flipflop', savedir, t, results, timings, problem_size) return t, results, timings
def fourpeaks_factory(length=50, t_pct=0.1): fourpeaks_fitness = count_evaluations(mlrose.FourPeaks, t_pct=t_pct) fourpeaks_fitness_final = mlrose.CustomFitness(fourpeaks_fitness) fourpeaks = mlrose.DiscreteOpt(length=length, fitness_fn=fourpeaks_fitness_final) T = int(t_pct * length) global_optimum = 2 * length - T - 1 return fourpeaks, global_optimum
def queens_factory(length=8): fitness = count_evaluations(QueensCustom) fitness_final = mlrose.CustomFitness(fitness) problem = mlrose.DiscreteOpt(length=length, fitness_fn=fitness_final, max_val=length) global_optimum = int(comb(length, 2)) # I think? return problem, global_optimum
def queens(n_queens=16): fitness_fn = mlrose.Queens() problem = mlrose.DiscreteOpt(length=n_queens, maximize=False, fitness_fn=fitness_fn, max_val=n_queens) return problem
def run_optimization_loop(self, sentence, init_state): fitness_function = mlrose.CustomFitness(partial(self.scoringNN.score, sentence)) problem = mlrose.DiscreteOpt(length=len(sentence), fitness_fn=fitness_function, maximize=True, max_val=len(ALL_TAGS)) try: best_state, best_fitness = self.search_class(problem, init_state=init_state, **self.search_params) except TypeError: best_state, best_fitness = self.search_class(problem, **self.search_params) return best_state
def OneMax(self, length=10, verbose=False): self.problem = 'onemax{l}'.format(l=length) self.verbose = verbose np.random.seed(0) problem_size = 1000 fitness = mlrose.OneMax() state = np.random.randint(2, size=problem_size) self.problem_fit = mlrose.DiscreteOpt(length=problem_size, fitness_fn=fitness, maximize=True)
def fit(length, fitness): problem = mlrose.DiscreteOpt(length = length, fitness_fn = fitness, maximize = True, max_val = 2) iterations = [10,50,100,200,400,800,1600,3200] RHC, SA, GA, MM = ([],[],[],[]) time_RHC, time_SA, time_GA, time_MM = ([],[],[],[]) for iter in iterations: print ("max iterations = " + str(iter)) start_time = time.time() best_fitness = 0 for times in range(10): best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts = 10, max_iters = iter, restarts = 0, init_state = np.random.randint(2, size=(length,))) best_fitness = max(best_fitness, best_fitness) #print(best_state) RHC.append(best_fitness) print(best_fitness) time_RHC.append((time.time() - start_time)/10) start_time = time.time() best_fitness = 0 for times in range(10): best_state, best_fitness = mlrose.simulated_annealing(problem, schedule = mlrose.GeomDecay(), max_attempts = 10, max_iters = iter, init_state = np.random.randint(2, size=(length,))) best_fitness = max(best_fitness, best_fitness) #print(best_state) SA.append(best_fitness) print(best_fitness) time_SA.append((time.time() - start_time)/10) start_time = time.time() best_fitness = 0 best_state, best_fitness = mlrose.genetic_alg(problem, pop_size = 200, mutation_prob = 0.1, max_attempts = 10, max_iters = iter) #print(best_state) GA.append(best_fitness) print(best_fitness) time_GA.append((time.time() - start_time)) start_time = time.time() best_fitness = 0 best_state, best_fitness = mlrose.mimic(problem, pop_size = 200, keep_pct = 0.2, max_attempts = 10, max_iters = iter) #print(best_state) MM.append(best_fitness) print(best_fitness) time_MM.append((time.time() - start_time)) plot(RHC, SA, GA, MM, time_RHC, time_SA, time_GA, time_MM, iterations) filewrite_array("iterations:", iterations) filewrite_array("Fitness(RHC):", RHC) filewrite_array("Fitness(SA):", SA) filewrite_array("Fitness(GA):", GA) filewrite_array("Fitness(MM):", MM) filewrite_array("Fitness(time_RHC):", time_RHC) filewrite_array("Fitness(time_SA):", time_SA) filewrite_array("Fitness(time_GA):", time_GA) filewrite_array("Fitness(time_MM):", time_MM)
def knapsack(n_items=5): max_val = 5 weights = np.random.choice(range(1, 10), n_items) values = np.random.choice(range(1, max_val), n_items) fitness_fn = mlrose.Knapsack(weights, values) problem = mlrose.DiscreteOpt(length=n_items, fitness_fn=fitness_fn, max_val=max_val) return problem
def create_problem(self): """weights = [10, 5, 2, 8, 15, 4, 12, 9, 7] values = [1, 2, 3, 4, 5, 6, 7, 8, 9]""" weights = [10, 5, 2, 8, 15] values = [1, 2, 3, 4, 5] max_weight_pct = 0.6 fitness = mlrose.Knapsack(weights, values, max_weight_pct) problem = mlrose.DiscreteOpt(length=5, fitness_fn=fitness, maximize=True, max_val=6) return problem
def maxKColor(edges, nodes, colors): fitness = mlrose.MaxKColor(edges) problem = mlrose.DiscreteOpt(length = nodes, fitness_fn = fitness, maximize = False, max_val = colors) t0 = time() best_state, best_fitness = mlrose.random_hill_climb(problem, max_attempts=100, max_iters=np.inf, init_state=None) finish = time() - t0 print(best_state) print(best_fitness) print(finish)
def geneticAlgorithm(fitness, x): # This code was originally taken and modified from https://mlrose.readthedocs.io/en/stable/source/intro.html start = time.time() # Initialize fitness function object using pre-defined class #fitness = mlrose.Queens() # Define optimization problem object if (x == 0): problem = mlrose.DiscreteOpt(length=12, fitness_fn=fitness, maximize=False, max_val=12) elif (x == 1): problem = mlrose.DiscreteOpt(length=9, fitness_fn=fitness, maximize=False, max_val=3) else: problem = mlrose.DiscreteOpt(length=8, fitness_fn=fitness, maximize=True, max_val=8) # Solve using genetic algorithm best_state, best_fitness, fitness_curve = mlrose.genetic_alg( problem, pop_size=200, mutation_prob=0.1, curve=True, max_iters=1000, random_state=1) end = time.time() print("Genetic Algorithm:") print('The best state found is: ', best_state) print('The fitness at the best state is: ', best_fitness) print("Time: " + str(end - start)) return best_fitness, end - start
def run(): fitness = mlrose.FourPeaks(t_pct=0.1) arrLen = 100 problem = mlrose.DiscreteOpt(length=arrLen, fitness_fn=fitness) # basePath = 'C:\\Users\\mwest\\Desktop\\ML\\source\\Machine-Learning-Local - Copy\\Graphs\\randomized\\Four Peaks\\' basePath = None runHill(problem, basePath) runAnnealing(problem, basePath) runGenetic(problem, basePath) runMimic(problem, basePath) return
def get_prob(self, t_pct=None, p_length=None): if self.prob_name == 'Four Peaks': fitness = mlrose.FourPeaks(t_pct) p_len = 100 self.schedule = mlrose.ExpDecay() self.restarts = 0 self.mutation_prob = 0.1 self.keep_pct = 0.1 self.pop_size = 500 elif self.prob_name == "Continuous Peaks": fitness = mlrose.ContinuousPeaks(t_pct) p_len = 100 self.schedule = mlrose.GeomDecay() self.restarts = 0 self.mutation_prob = 0.1 self.keep_pct = 0.2 self.pop_size = 200 elif self.prob_name == "Max K Color": fitness = mlrose.MaxKColor(self.COLOREDGE) p_len = 100 self.schedule = mlrose.ExpDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.2 self.pop_size = 200 elif self.prob_name == "Flip Flop": fitness = mlrose.FlipFlop() p_len = 100 self.schedule = mlrose.ArithDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.5 self.pop_size = 500 elif self.prob_name == "One Max": fitness = mlrose.OneMax() p_len = 100 self.schedule = mlrose.GeomDecay() self.restarts = 0 self.mutation_prob = 0.2 self.keep_pct = 0.1 self.pop_size = 100 else: fitness = None p_len = 0 if p_length is None: p_length = p_len problem = mlrose.DiscreteOpt(length=p_length, fitness_fn=fitness) init_state = np.random.randint(2, size=p_length) return problem, init_state
def run_one(fitness_fn, nbit, algos): records = [] # Initilize problem opt_prob = mlrose.DiscreteOpt(length=nbit, fitness_fn=fitness_fn) for algo, parameters in algos.items(): for parameter in tqdm(parameters, desc=algo.__name__): # Track runtime start = time.process_time() if algo == mlrose.genetic_alg: best_state, best_fitness, fitness_curve = algo( problem=opt_prob, max_attempts=10, curve=True, pop_size=10 * nbit, **parameter) elif algo == mlrose.mimic: best_state, best_fitness, fitness_curve = algo( problem=opt_prob, max_attempts=10, curve=True, pop_size=10 * nbit, fast_mimic=True, **parameter) else: best_state, best_fitness, fitness_curve = algo( problem=opt_prob, max_attempts=10, curve=True, **parameter) end = time.process_time() runtime = end - start # Add record records.append({ "nbit": nbit, "algorithm": algo.__name__, "parameter_dict": parameter, "parameter": parameter_str(parameter), "runtime": runtime, "best_fitness": best_fitness, "fitness_curve": fitness_curve }) return records
def Knapsack(self, length=10, max_weight_pct=0.2, verbose=False): def gen_data(length): weights = [] values = [] max_weight = 50 max_val = 50 for i in range(length): weights.append(np.random.randint(1, max_weight)) values.append(np.random.randint(1, max_val)) return [weights, values] self.problem = 'knapsack{l}'.format(l=length) self.verbose = verbose weights, values = gen_data(length) fitness_fn = mlrose.Knapsack(weights, values, max_weight_pct) # define optimization problem object self.problem_fit = mlrose.DiscreteOpt(length=len(weights), fitness_fn=fitness_fn, maximize=True)