Beispiel #1
0
def get_overview_mutation(of):
    overview_mutations = []
    for correction in [Correction(of), MirrorCorrection(of)]:
        for r in [0.1, 0.5, 0.75]:
            overview_mutations.append(
                CauchyMutation(r, correction=Correction(of)))
    return overview_mutations
Beispiel #2
0
def get_correction_mutation(of):
    correction_mutations = []
    for correction in [Correction(of), MirrorCorrection(of)]:
        for r in [1, 1.5, 2, 5]:
            correction_mutations.append(
                CauchyMutation(r, correction=Correction(of)))
    return correction_mutations
Beispiel #3
0
def experiment_fsa(of, maxeval, num_runs, T0, n0, alpha, r):
    results = []
    for i in tqdm(
            range(num_runs),
            'Testing T0={}, n0={}, alpha={}, r={}'.format(T0, n0, alpha, r)):
        mut = CauchyMutation(r=r, correction=Correction(of))
        result = FastSimulatedAnnealing(of,
                                        maxeval=maxeval,
                                        T0=T0,
                                        n0=n0,
                                        alpha=alpha,
                                        mutation=mut).search()
        result['run'] = i
        result['heur'] = 'FSA_{}_{}_{}_{}'.format(T0, n0, alpha,
                                                  r)  # name of the heuristic
        result['T0'] = T0
        result['n0'] = n0
        result['alpha'] = alpha
        result['r'] = r
        results.append(result)

    return pd.DataFrame(results,
                        columns=[
                            'heur', 'run', 'T0', 'n0', 'alpha', 'r', 'best_x',
                            'best_y', 'neval'
                        ])
Beispiel #4
0
# 
# In the current settings there are 5 crossover, 3 correction and 2 mutation options.

# In[7]:


multipoints = [1, 2, 3]
crossovers = [
    {'crossover': Crossover(), 'name': 'mix'},
    {'crossover': RandomCombination(), 'name': 'rnd'}]
for multipoint in multipoints:
    crossover = {'crossover': UniformMultipoint(multipoint), 'name': 'uni{}'.format(multipoint)}
    crossovers.append(crossover)

corrections = [
    {'correction': Correction(tsp), 'name': 'vanilla'},
    {'correction': MirrorCorrection(tsp), 'name': 'mirror'},
    {'correction': ExtensionCorrection(tsp), 'name': 'extension'}]

parameters = [1, 3, 5]
mutations = []
for parameter in parameters:
    for correction in corrections:
        mutation = {'mutation': CauchyMutation(r=parameter, correction = correction['correction']), 'name': 'cauchy{}_'
                   .format(parameter)+correction['name']}
        mutations.append(mutation)
        mutation = {'mutation': LevyMutation(scale=parameter, correction = correction['correction']), 'name': 'levy{}_'
                   .format(parameter)+correction['name']}
        mutations.append(mutation)

Beispiel #5
0
    heur_name = 'GO_{}'.format(N)
    for i in tqdm(range(num_runs), 'Testing {}'.format(heur_name)):
        result = GeneticOptimization(of, maxeval, N=N, M=M, Tsel1=Tsel1, Tsel2=Tsel2, 
                                     mutation=mutation, crossover=crossover).search()
        result['run'] = i
        result['heur'] = heur_name
        result['N'] = N
        results.append(result)
    return pd.DataFrame(results, columns=['heur', 'run', 'N', 'best_x', 'best_y', 'neval'])


# In[16]:


results = pd.DataFrame()
mutation = CauchyMutation(r=0.75, correction=Correction(tsp))
crossover = UniformMultipoint(1)
for N in [1, 2, 3, 5, 10, 20, 30, 100]:
    res = experiment_go(of=tsp, maxeval=maxeval, num_runs=NUM_RUNS, N=N, M=N*3, Tsel1=0.1, Tsel2=1, 
                        mutation=mutation, crossover=crossover)
    results = pd.concat([results, res], axis=0)


# In[17]:


# (well-known performance criteria from previous classes)
def rel(x):
    return len([n for n in x if n < np.inf])/len(x)
def mne(x):
    return np.mean([n for n in x if n < np.inf])
Beispiel #6
0
# # Analysis
#
# **Can we improve the best configuration ($T_0=1$)?**
#
# Let's carefully analyze the data...

# In[16]:

heur = FastSimulatedAnnealing(tsp,
                              maxeval=1000,
                              T0=1,
                              n0=1,
                              alpha=2,
                              mutation=CauchyMutation(
                                  r=0.5, correction=Correction(tsp)))
result = heur.search()
print('neval = {}'.format(result['neval']))
print('best_x = {}'.format(result['best_x']))
print('best_y = {}'.format(result['best_y']))

# In[17]:

log_data = result['log_data'].copy()
log_data = log_data[['step', 'x', 'f_x', 'y', 'f_y', 'T',
                     'swap']]  # column re-ordering, for better readability
log_data.head(10)

# In[18]:

Beispiel #7
0
# In[12]:

results = pd.DataFrame()
for crossover in crossovers:
    heur_name = 'GO_{}'.format(crossover['name'])
    runs = []
    for i in tqdm(range(NUM_RUNS), 'Testing {}'.format(heur_name)):
        run = GeneticOptimization(tsp,
                                  maxeval,
                                  N=5,
                                  M=15,
                                  Tsel1=1,
                                  Tsel2=0.1,
                                  mutation=CauchyMutation(
                                      r=.75, correction=Correction(tsp)),
                                  crossover=crossover['crossover']).search()
        run['run'] = i
        run['heur'] = heur_name
        run['crossover'] = crossover['name']
        runs.append(run)

    res_df = pd.DataFrame(
        runs,
        columns=['heur', 'run', 'crossover', 'best_x', 'best_y', 'neval'])
    results = pd.concat([results, res_df], axis=0)

# In[13]:


def rel(x):
Beispiel #8
0
# In[3]:

np.random.seed(6)

# We will study DRASET's behavior on two functions - Rastrigin's function 6 and Rosenbrock's valley (De Jong's function 2). Both functions are defined here: http://www.geatbx.com/docu/fcnindex-01.html There are also graphs of these functions.
#
# Random shooting is chosen as a baseline method.

# We start with Rastrigin's function 6. This function has many local minima.

# In[4]:

function = Plato(n=2, eps=0.1)
corr = Correction(
    function
)  # correction is not discussed in the original article, I choose the simpliest one

# In[5]:

RUNS = 500
MAXEV = 50000  # the heuristic is implemented with maxeval parameter, I don't want to restrict heuristic by this parameter, so I set it at this high level

# We define two experiments: DRASET and random shooting.

# In[6]:


def experiment_draset(of, maxeval, num_runs, E, alfa0, N_general, N_local,
                      correction):
    results = []
stats_fsa = stats_fsa.reset_index()
stats_fsa.sort_values(by=['alpha'])
stats_fsa


# The optimal FSA parameters are following:
# * Initial temperature : T0 = 1
# * Mutation probability: p = 0.10
# * Cooling parameters  :
#     - n0 = 1
#     - alpha = 1

# In[28]:


mutation = BinaryMutation(p=0.10, correction=Correction(scp))
heur = FastSimulatedAnnealing(of=scp, maxeval=maxeval, T0=1, n0=1, alpha=1, mutation=mutation)
heur.reset()
result = heur.search()
print('neval = {}'.format(result['neval']))
print('best_x = {}'.format(result['best_x']))
print('best_y = {}'.format(result['best_y']))


# ## Genetic Optimization heuristic
# Let's optimize the GO heuristic in order to use the results in the mixed heuristic

# In[29]:


from heur_go import GeneticOptimization, UniformMultipoint
Beispiel #10
0
# optimum
dj.evaluate(np.zeros(5))

# ## Generalized mutation demo on De Jong 1
#
# Let's test mutation corrections first:

# In[17]:

from heur_aux import Correction, MirrorCorrection, ExtensionCorrection

# In[18]:

# sticky correction in R^n (mind x[1])
Correction(dj).correct(np.array([6.12, -4.38, 2.96]))

# In[19]:

# mirror correction in R^n (mind x[1])
MirrorCorrection(dj).correct(np.array([6.12, -4.38, 2.96]))

# In[20]:

# extension correction in R^n (mind x[1])
ExtensionCorrection(dj).correct(np.array([6.12, -4.38, 2.96]))

# I.e. corrections work also in the continuous case, as expected...

# In[21]:
Beispiel #11
0
crossovers = [
    {'crossover': Crossover(), 'name': 'mix'},
    {'crossover': UniformMultipoint(1), 'name': 'uni'},  #  test for other n as well!
    {'crossover': RandomCombination(), 'name': 'rnd'},
]


# In[12]:

results = pd.DataFrame()
for crossover in crossovers:
    heur_name = 'GO_{}'.format(crossover['name'])
    runs = []
    for i in tqdm_notebook(range(NUM_RUNS), 'Testing {}'.format(heur_name)):
        run = GeneticOptimization(tsp, maxeval, N=5, M=15, Tsel1=1.0, Tsel2=0.5, 
                                  mutation=CauchyMutation(r=1.0, correction=Correction(tsp)),
                                  crossover=crossover['crossover']).search()
        run['run'] = i
        run['heur'] = heur_name
        run['crossover'] = crossover['name']
        runs.append(run)
    
    
    res_df = pd.DataFrame(runs, columns=['heur', 'run', 'crossover', 'best_x', 'best_y', 'neval'])
    results = pd.concat([results, res_df], axis=0)


# In[13]:

def rel(x):
    return len([n for n in x if n < np.inf])/len(x)