def experiment_fsa(of, maxeval, num_runs, T0, n0, alpha, r): results = [] for i in tqdm( range(num_runs), 'Testing T0={}, n0={}, alpha={}, r={}'.format(T0, n0, alpha, r)): mut = CauchyMutation(r=r, correction=Correction(of)) result = FastSimulatedAnnealing(of, maxeval=maxeval, T0=T0, n0=n0, alpha=alpha, mutation=mut).search() result['run'] = i result['heur'] = 'FSA_{}_{}_{}_{}'.format(T0, n0, alpha, r) # name of the heuristic result['T0'] = T0 result['n0'] = n0 result['alpha'] = alpha result['r'] = r results.append(result) return pd.DataFrame(results, columns=[ 'heur', 'run', 'T0', 'n0', 'alpha', 'r', 'best_x', 'best_y', 'neval' ])
def get_correction_mutation(of): correction_mutations = [] for correction in [Correction(of), MirrorCorrection(of)]: for r in [1, 1.5, 2, 5]: correction_mutations.append( CauchyMutation(r, correction=Correction(of))) return correction_mutations
def get_overview_mutation(of): overview_mutations = [] for correction in [Correction(of), MirrorCorrection(of)]: for r in [0.1, 0.5, 0.75]: overview_mutations.append( CauchyMutation(r, correction=Correction(of))) return overview_mutations
{'crossover': Crossover(), 'name': 'mix'}, {'crossover': RandomCombination(), 'name': 'rnd'}] for multipoint in multipoints: crossover = {'crossover': UniformMultipoint(multipoint), 'name': 'uni{}'.format(multipoint)} crossovers.append(crossover) corrections = [ {'correction': Correction(tsp), 'name': 'vanilla'}, {'correction': MirrorCorrection(tsp), 'name': 'mirror'}, {'correction': ExtensionCorrection(tsp), 'name': 'extension'}] parameters = [1, 3, 5] mutations = [] for parameter in parameters: for correction in corrections: mutation = {'mutation': CauchyMutation(r=parameter, correction = correction['correction']), 'name': 'cauchy{}_' .format(parameter)+correction['name']} mutations.append(mutation) mutation = {'mutation': LevyMutation(scale=parameter, correction = correction['correction']), 'name': 'levy{}_' .format(parameter)+correction['name']} mutations.append(mutation) # In[8]: results = pd.DataFrame() for crossover in crossovers: for mutation in mutations: heur_name = 'GO_mut:({})_cro:{}'.format(mutation['name'], crossover['name']) runs = []
heur_name = 'GO_{}'.format(N) for i in tqdm(range(num_runs), 'Testing {}'.format(heur_name)): result = GeneticOptimization(of, maxeval, N=N, M=M, Tsel1=Tsel1, Tsel2=Tsel2, mutation=mutation, crossover=crossover).search() result['run'] = i result['heur'] = heur_name result['N'] = N results.append(result) return pd.DataFrame(results, columns=['heur', 'run', 'N', 'best_x', 'best_y', 'neval']) # In[16]: results = pd.DataFrame() mutation = CauchyMutation(r=0.75, correction=Correction(tsp)) crossover = UniformMultipoint(1) for N in [1, 2, 3, 5, 10, 20, 30, 100]: res = experiment_go(of=tsp, maxeval=maxeval, num_runs=NUM_RUNS, N=N, M=N*3, Tsel1=0.1, Tsel2=1, mutation=mutation, crossover=crossover) results = pd.concat([results, res], axis=0) # In[17]: # (well-known performance criteria from previous classes) def rel(x): return len([n for n in x if n < np.inf])/len(x) def mne(x): return np.mean([n for n in x if n < np.inf])
# # Optimization demonstration # In[12]: from heur_aux import MirrorCorrection, CauchyMutation from heur_fsa import FastSimulatedAnnealing # In[13]: heur = FastSimulatedAnnealing(of, maxeval=10000, T0=10, n0=10, alpha=2, mutation=CauchyMutation( r=0.1, correction=MirrorCorrection(of))) res = heur.search() print('x_best = {}'.format(res['best_x'])) print('y_best = {}'.format(res['best_y'])) print('neval = {}'.format(res['neval'])) # In[14]: visualize_solution(res['best_x'], of) # ## Excercises # # * ~~Implement~~ fix the implementation of this objective function (especially the random solution generator) # * Tune heuristics for this objective function # * Tune this objective function, e.g. by penalization for smaller number of clusters than $h$ (and make sure you understand why this is possible) # * Compare heuristic approach to the original _k_-means
MirrorCorrection(dj).correct(np.array([6.12, -4.38, 2.96])) # In[20]: # extension correction in R^n (mind x[1]) ExtensionCorrection(dj).correct(np.array([6.12, -4.38, 2.96])) # I.e. corrections work also in the continuous case, as expected... # In[21]: from heur_aux import CauchyMutation # In[22]: cauchy = CauchyMutation(r=.1, correction=MirrorCorrection(dj)) cauchy.mutate(np.array([6.12, -4.38, 2.96])) # ## De Jong 1 optimization via FSA # # Thanks to current state of the framework, no modification to FSA is needed. # In[23]: from heur_fsa import FastSimulatedAnnealing # In[24]: heur = FastSimulatedAnnealing(dj, maxeval=10000, T0=10,
crossovers = [ {'crossover': Crossover(), 'name': 'mix'}, {'crossover': UniformMultipoint(1), 'name': 'uni'}, # test for other n as well! {'crossover': RandomCombination(), 'name': 'rnd'}, ] # In[12]: results = pd.DataFrame() for crossover in crossovers: heur_name = 'GO_{}'.format(crossover['name']) runs = [] for i in tqdm_notebook(range(NUM_RUNS), 'Testing {}'.format(heur_name)): run = GeneticOptimization(tsp, maxeval, N=5, M=15, Tsel1=1.0, Tsel2=0.5, mutation=CauchyMutation(r=1.0, correction=Correction(tsp)), crossover=crossover['crossover']).search() run['run'] = i run['heur'] = heur_name run['crossover'] = crossover['name'] runs.append(run) res_df = pd.DataFrame(runs, columns=['heur', 'run', 'crossover', 'best_x', 'best_y', 'neval']) results = pd.concat([results, res_df], axis=0) # In[13]: def rel(x): return len([n for n in x if n < np.inf])/len(x)