x1 = np.linspace(domain[0, 0], domain[0, 1], num_design)
x2 = np.linspace(domain[1, 0], domain[1, 1], num_design)
X1, X2 = np.meshgrid(x1, x2)
X_design = np.hstack([X1.flatten()[:, None], X2.flatten()[:, None]])

# The initial points to start from
X_init = np.random.rand(num_init)[:, None] * 6.
X_init = domain[:, 0] + (domain[:, 1] - domain[:, 0]) * design.latin_center(
    num_init, 2)

# Globally minimize f
x, y, ei, _ = pybgo.minimize(
    f,
    X_init,
    X_design,
    tol=1e-5,
    callback=pybgo.plot_summary_2d,  # This plots the results
    # at each iteration of
    # the algorithm
    prefix=os.path.join(out_dir, 'out'),
    save_model=True)

# The best value at each iteration
bv = np.array([y[:i, 0].min() for i in xrange(1, y.shape[0])])

fig, ax = plt.subplots()
it = np.arange(1, bv.shape[0] + 1)
ax.plot(it, bv, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('Best value', fontsize=16)
fig.savefig(os.path.join(out_dir, 'bv.png'))
plt.close(fig)
#X_design = domain[:, 0] + (domain[:, 1] - domain[:, 0]) * design.latin_center(num_design, 2) 
# For this one we use a regular grid only because we want to do some contour
# plots
x1 = np.linspace(domain[0, 0], domain[0, 1], num_design)
x2 = np.linspace(domain[1, 0], domain[1, 1], num_design)
X1, X2= np.meshgrid(x1, x2)
X_design = np.hstack([X1.flatten()[:, None], X2.flatten()[:, None]])

# The initial points to start from
X_init = np.random.rand(num_init)[:, None] * 6.
X_init = domain[:, 0] + (domain[:, 1] - domain[:, 0]) * design.latin_center(num_init, 2) 

# Globally minimize f
x, y, ei, _ = pybgo.minimize(f, X_init, X_design, tol=1e-5,
                          callback=pybgo.plot_summary_2d,      # This plots the results
                                                            # at each iteration of
                                                            # the algorithm
                          prefix=os.path.join(out_dir, 'out'),
                          save_model=True)

# The best value at each iteration
bv = np.array([y[:i, 0].min() for i in xrange(1, y.shape[0])])

fig, ax = plt.subplots()
it = np.arange(1, bv.shape[0] + 1)
ax.plot(it, bv, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('Best value', fontsize=16)
fig.savefig(os.path.join(out_dir, 'bv.png'))
plt.close(fig)

fig, ax = plt.subplots()
示例#3
0
def optimize_stiefel(func, X0, args=(), tau_max=.5, max_it=1, tol=1e-6,
                     disp=False, tau_find_freq=100):
    """
    Optimize a function over a Stiefel manifold.

    :param func: Function to be optimized
    :param X0: Initial point for line search
    :param tau_max: Maximum step size
    :param max_it: Maximum number of iterations
    :param tol: Tolerance criteria to terminate line search
    :param disp: Choose whether to display output
    :param args: Extra arguments passed to the function
    """
    tol = float(tol)
    assert tol > 0, 'Tolerance must be positive'
    max_it = int(max_it)
    assert max_it > 0, 'The maximum number of iterations must be a positive '\
                       + 'integer'
    tau_max = float(tau_max)
    assert tau_max > 0, 'The parameter `tau_max` must be positive.'
    k = 0
    X = X0.copy()
    nit = 0
    nfev = 0
    success = False
    if disp:
        print 'Stiefel Optimization'.center(80)
        print '{0:4s} {1:11s} {2:5s}'.format('It', 'F', '(F - F_old) / F_old')
        print '-' * 30

    
    ls_func = LSFunc()
    ls_func.func = func
    decrease_tau = False
    tau_max0 = tau_max
    while nit <= max_it:
        nit += 1
        F, G = func(X, *args)
        F_old = F
        nfev += 1
        A = compute_A(G, X)
        ls_func.A = A
        ls_func.X = X
        ls_func.func_args = args
        ls_func.tau_max = tau_max
        increased_tau = False
        if nit == 1 or decrease_tau or nit % tau_find_freq == 0:
            # Need to minimize ls_func with respect to each argument
            tau_init = np.linspace(-10, 0., 3)[:, None]
            tau_d = np.linspace(-10, 0., 50)[:, None]
            tau_all, F_all = pybgo.minimize(ls_func, tau_init, tau_d, fixed_noise=1e-16,
                    add_at_least=1, tol=1e-2, scale=True,
                    train_every=1)[:2]
            nfev += tau_all.shape[0]
            idx = np.argmin(F_all)
            tau = np.exp(tau_all[idx, 0]) * tau_max
            if tau_max - tau <= 1e-6:
                tau_max = 1.2 * tau_max
                if disp:
                    print 'increasing tau_max to {0:1.5e}'.format(tau_max)
                    increased_tau = True
            if decrease_tau:
                tau_max = .8 * tau_max
                if disp:
                    print 'decreasing max_tau to {0:1.5e}'.format(tau_max)
                decrease_tau = False
            F = F_all[idx, 0]
        else:
            F = ls_func([np.log(tau /  tau_max)])
        delta_F = (F_old - F) / np.abs(F_old)
        if delta_F < 0:
            if disp:
                print '*** backtracking'
            nit -= 1
            decrease_tau = True
            continue
        X_old = X
        X = Y_func(tau, X, A)
        if disp:
            print '{0:4s} {1:1.5e} {2:5e} tau = {3:1.3e}, tau_max = {4:1.3e}'.format(
             str(nit).zfill(4), F, delta_F, tau, tau_max)
        if delta_F <= tol:
            if disp:
                print '*** Converged ***'
            success = True
            break
    res = OptimizeResult()
    res.tau_max = tau_max
    res.X = X
    res.nfev = nfev
    res.nit = nit
    res.fun = F
    res.success = success
    return res