def experiment_sg(of, maxeval, num_runs, hmax):
    results = []
    for i in tqdm_notebook(range(num_runs), 'Testing hmax={}'.format(hmax)):
        result = ShootAndGo(of, maxeval=maxeval, hmax=hmax).search() # dict with results of one run
        result['run'] = i
        result['heur'] = 'SG_{}'.format(hmax) # name of the heuristic
        result['hmax'] = hmax
        results.append(result)
    
    return pd.DataFrame(results, columns=['heur', 'run', 'hmax', 'best_x', 'best_y', 'neval'])
Beispiel #2
0
def experiment_random(of, maxeval, num_runs):
    results = []
    for i in tqdm(range(num_runs), 'Testing maxeval={}'.format(maxeval)):
        result = ShootAndGo(of, maxeval, hmax=0).search()
        result['run'] = i
        result['heur'] = 'Random_{}'.format(maxeval)
        result['maxeval'] = maxeval
        results.append(result)
    return pd.DataFrame(
        results,
        columns=['heur', 'run', 'maxeval', 'best_x', 'best_y', 'neval'])
Beispiel #3
0
def get_overview_heurs(of, maxeval, RD=True, FSA=True, DE=True, GO=True):
    overview_heurs = []

    if RD:
        for hmax in [0, 5, 10, 50, np.inf]:
            for RD in [False, True]:
                overview_heurs.append(
                    ShootAndGo(of,
                               maxeval=maxeval,
                               hmax=hmax,
                               random_descent=RD))
    if FSA:
        for T0 in [1e-10, 1e-2, 1, np.inf]:
            for mutation in get_overview_mutation(of):
                for n0 in [1, 2, 5]:
                    overview_heurs.append(
                        FastSimulatedAnnealing(of,
                                               maxeval=maxeval,
                                               T0=T0,
                                               n0=n0,
                                               alpha=2,
                                               mutation=mutation))

    if DE:
        for N in [4, 10, 20]:
            for CR in [0.2, 0.5, 0.8]:
                for F in [0.5, 1, 2]:
                    overview_heurs.append(
                        DifferentialEvolution(of,
                                              maxeval=maxeval,
                                              N=N,
                                              CR=CR,
                                              F=F))

    if GO:
        for mutation in get_overview_mutation(of):
            for crossover in [
                    Crossover(),
                    UniformMultipoint(1),
                    RandomCombination()
            ]:
                for N in [1, 2, 5, 10, 30, 100]:
                    for Tsel1 in [0.5, 1]:
                        overview_heurs.append(
                            GeneticOptimization(of,
                                                maxeval,
                                                N=N,
                                                M=N * 3,
                                                Tsel1=Tsel1,
                                                Tsel2=0.1,
                                                mutation=mutation,
                                                crossover=crossover))
    return overview_heurs
def experiment(of, maxeval, num_runs, hmax, random_descent):
    method = 'RD' if random_descent else 'SD'
    results = []
    for i in tqdm(range(num_runs),
                  'Testing method={}, hmax={}'.format(method, hmax)):
        result = ShootAndGo(of, maxeval=maxeval,
                            hmax=hmax).search()  # dict with results of one run
        result['run'] = i
        result['heur'] = 'SG_{}_{}'.format(method,
                                           hmax)  # name of the heuristic
        result['method'] = method
        result['hmax'] = hmax
        results.append(result)

    return pd.DataFrame(
        results,
        columns=['heur', 'run', 'method', 'hmax', 'best_x', 'best_y', 'neval'])
# * Shoot & Go heuristic (also known as *Iterated Local Search*, *Random-restart hill climbing*, etc)
#     * $hmax \in \{ 0, 1, \ldots, \infty \}$ parameter - maximum number of local searches / hill climbs
#     * note that $\mathrm{SG}_{0}$ is pure Random Shooting (Random Search)
#
# * implemented as ``class ShootAndGo(Heuristic)`` in ``src/heur_sg.py``
#
#

# In[12]:

from heur_sg import ShootAndGo

# In[13]:

# Random Shooting for the AirShip initialization...
demo_rs = ShootAndGo(airship, maxeval=100, hmax=0)
# ...and execution:
demo_rs.search()

# # 2. Performance evaluation
#
# ## What is the recommended approach to store and analyze results of your experiments?
#
# 1. Append all relevant statistics from a single run into table (e.g. CSV file in memory or on disk), including all task and heuristic parameters
# 2. Load the table into analytical tool of your choice (**data frame**, Excel or Google Docs spreadsheets, etc.)
# 3. Pivot by relevant parameters, visualize in tables or charts

# ## Demonstration
#
# Neccessary setup first:
Beispiel #6
0
# In[7]:

# neighbourhood of x:
N = tsp.get_neighborhood(x, 1)
print(N)

# In[8]:

# decoded neighbours and their objective function values
for xn in N:
    print('{} ({}) -> {:.4f}'.format(xn, tsp.decode(xn), tsp.evaluate(xn)))

# **Carefully** mind the difference between encoded solution vector vs decoded city tour and meaning of such neighbourhood.

# ### TSP optimization using Random Shooting ($\mathrm{SG}_{0}$)

# In[9]:

heur = ShootAndGo(tsp, maxeval=1000, hmax=0)
print(heur.search())

# # Assignments:
#
# 1. Find a better performing heuristic (to test TSP implementation on your own).
# 2. Can you improve heuristic performance using any
#    1. **better random point generator**?
#    2. **better neighbourhood generator**?
#
# Use performance measure(s) of your choice (e.g. $FEO$).