Exemple #1
0
        args.append(rate)
    if algo in 'RMSProp ADADELTA ESGD'.split():
        slots.append('rmsh={}')
        args.append(half)
        slots.append('rmsr={:.2e}')
        args.append(reg)
    return ' '.join(slots).format(*args)


# Here we run a number of rosenbrock optimization algorithms and measure their
# performance. Below we plot the results.

algos = 'SGD NAG RMSProp RProp Adam ADADELTA ESGD'.split()
results = ((make_label(loss, key), xs, ys)
           for key, (xs, ys, loss)
           in sorted(rosenbrock.test(algos), key=by_loss))

_, ax = plt.subplots(1, 1)

for color, (label, xs, ys) in zip(rosenbrock.COLORS, results):
    ax.plot(xs, ys, 'o-', color=color, label=label,
            alpha=0.8, lw=2, markersize=5,
            mew=1, mec=color, mfc='none')

# make a contour plot of the rosenbrock function surface.
X, Y = np.meshgrid(np.linspace(-1.3, 1.3, 31), np.linspace(-0.9, 1.7, 31))
Z = 100 * (Y - X ** 2) ** 2 + (1 - X) ** 2
ax.plot([1], [1], 'x', mew=3, markersize=10, color='#111111')
ax.contourf(X, Y, Z, np.logspace(-1, 3, 31), cmap='gray_r')

ax.set_xlim(-1.3, 1.3)
Exemple #2
0
This example is meant to show how optimization hyperparameters affect
performance across different optimization algorithms.

Due to the large number of optimizers that are evaluated in this example, it can
take a good while to run.
'''

import itertools
import matplotlib.pyplot as plt
import numpy as np

import rosenbrock

algos = 'NAG RMSProp Adam ADADELTA ESGD'.split()
results = rosenbrock.test(algos, n=10, init=[-1] * 100, limit=1000)

# Here we make plots of the marginal performance of each of the four
# hyperparameters. These are intended to get a sense of how random
# hyperparameter selection gives a decent idea of how different algorithms
# perform.

_, ((rate_ax, mu_ax), (half_ax, reg_ax)) = plt.subplots(2, 2)

by_algo = itertools.groupby(sorted(results), lambda item: item[0][0])
for color, (algo, items) in zip(rosenbrock.COLORS, by_algo):
    items = list(items)
    values = np.zeros((len(items), 5), 'f')
    for i, ((_, rate, mu, half, reg), (_, _, loss)) in enumerate(items):
        values[i] = [rate, mu, half, reg, loss]
    rates, mus, halfs, regs, losses = values.T
This example is meant to show how optimization hyperparameters affect
performance across different optimization algorithms.

Due to the large number of optimizers that are evaluated in this example, it can
take a good while to run.
'''

import itertools
import matplotlib.pyplot as plt
import numpy as np

import rosenbrock


algos = 'NAG RMSProp Adam ADADELTA ESGD'.split()
results = rosenbrock.test(algos, n=10, init=[-1] * 100, limit=1000)


# Here we make plots of the marginal performance of each of the four
# hyperparameters. These are intended to get a sense of how random
# hyperparameter selection gives a decent idea of how different algorithms
# perform.

_, ((rate_ax, mu_ax), (half_ax, reg_ax)) = plt.subplots(2, 2)

by_algo = itertools.groupby(sorted(results), lambda item: item[0][0])
for color, (algo, items) in zip(rosenbrock.COLORS, by_algo):
    items = list(items)
    values = np.zeros((len(items), 5), 'f')
    for i, ((_, rate, mu, half, reg), (_, _, loss)) in enumerate(items):
        values[i] = [rate, mu, half, reg, loss]