Example #1
0
def test_rosenbrock():
    def rosenbrock(x):
        return (1 - x[0])**2 + 100 * (x[1] - x[0]**2)**2

    x0 = np.zeros(2)

    x_min, res = minimize(rosenbrock, x0)
    assert res['success']
Example #2
0
def test_multiple_shapes():
    def f(x, y, z, a):
        return np.sum(x**2) + np.sum((y - 3)**2) + np.sum((z + a)**4)

    a = 2
    shapes = [(2, 3), (2, 2), (3, )]
    optim_vars_init = [np.ones(shape) for shape in shapes]
    optim_vars, res = minimize(f, optim_vars_init, args=(a, ))
    assert res['success']
    assert [var.shape for var in optim_vars] == shapes
    for var, target in zip(optim_vars, [0, 3, -a]):
        assert_allclose(var, target, atol=1e-1)
Example #3
0
def test_preconditioning():
    def f(x, y, z, a, b):
        return np.sum(x**2) + np.sum((y - 3)**2) + np.sum((z + a)**4)

    a = 2
    b = 5
    shapes = [(2, 3), (2, 2), (3, )]
    optim_vars_init = [np.ones(shape) for shape in shapes]

    def precon_fwd(x, y, z, a, b):
        return 3 * x, y / 2, z * 4

    def precon_bwd(x, y, z, a, b):
        return x / 3, 2 * y, z / 4

    optim_vars, res = minimize(f,
                               optim_vars_init,
                               args=(a, b),
                               precon_fwd=precon_fwd,
                               precon_bwd=precon_bwd)
    assert res['success']
    assert [var.shape for var in optim_vars] == shapes
    for var, target in zip(optim_vars, [0, 3, -a]):
        assert_allclose(var, target, atol=1e-1)
Example #4
0
# Author: Pierre Ablin <*****@*****.**>
# License: MIT

# This is the simplest example of autoptim use.

import numpy as np
from autoptim import minimize


# Specify the loss function, in a pytorch compatible-way :
def rosenbrock(x):
    return (1 - x[0])**2 + 100 * (x[1] - x[0]**2)**2


# Choose the starting point:

x0 = np.zeros(2)

x_min, _ = minimize(rosenbrock, x0)
print(x_min)
Example #5
0
# arguments.


def precon_fwd(beta, X, y, lbda):
    return beta * diag_precon


def precon_bwd(beta_precon, X, y, lbda):
    return beta_precon / diag_precon


beta0 = np.random.randn(p)
# Run the minimization with the preconditioning
t0 = time()
beta_min, _ = minimize(loss,
                       beta0,
                       args=(X, y, lbda),
                       precon_fwd=precon_fwd,
                       precon_bwd=precon_bwd)
print('Minimization with preconditioning took %.2f sec.' % (time() - t0))
print(beta_min)

# It gives the same output without preconditioning:
t0 = time()
beta_min, _ = minimize(loss, beta0, args=(X, y, lbda))
print('Minimization without preconditioning took %.2f sec.' % (time() - t0))
print(beta_min)

# But it is faster with preconditioning (about twice in this example, but it
# can give more impressing speedups)!
Example #6
0
# Author: Pierre Ablin <*****@*****.**>
# License: MIT

# Example of multi-dimensional arrays
import autograd.numpy as np
from autoptim import minimize

n = 100
p = 2

X = np.random.randn(n, p)

# The loss is minimized when X.dot(W) is decorrelated.


def loss(W, X):
    Y = np.dot(X, W)
    return -np.linalg.slogdet(W)[1] + 0.5 * np.sum(Y**2) / n


# The input is a square matrix
W0 = np.eye(p)

W, _ = minimize(loss, W0, args=(X, ))
print(W)
Y = X.dot(W)
print(Y.T.dot(Y) / n)  # Equal to identity
Example #7
0
# Author: Pierre Ablin <*****@*****.**>
# License: MIT

# An example with additional variables


import autograd.numpy as np
from autoptim import minimize


n = 10
p = 5

X = np.random.randn(n, p)
y = np.random.randn(n)
lbda = 0.1

# The loss shoulb be optimized over beta, with the other parameters fixed.


def loss(beta, X, y, lbda):
    return np.sum((np.dot(X, beta) - y) ** 2) + lbda * np.sum(beta ** 2)


beta0 = np.random.randn(p)

beta_min, _ = minimize(loss, beta0, args=(X, y, lbda))
print(beta_min)
Example #8
0
x = np.concatenate(
    (np.random.randn(n), 2 * np.random.randn(n), np.random.randn(n) + 1))

# Here, the model should fit both the means and the variances. Using
# scipy.optimize.minimize, one would have to vectorize by hand these variables.


def loss(means, variances, x):
    tmp = torch.zeros(n_components * n).double()
    for m, v in zip(means, variances):
        tmp += torch.exp(-(x - m)**2 / (2 * v**2)) / v
    return -torch.log(tmp).sum()


# autoptim can handle lists of unknown variables

means0 = np.random.randn(n_components)
variances0 = np.random.rand(n_components)
optim_vars = [means0, variances0]
# The variances should be constrained to positivity. To do so, we can pass
# a `bounds` list to `minimize`. Bounds are automatically broadcasted to
# match the input size.

bounds = [
    (None, None),  # corresponds to means: no constraint
    (0, None)
]  # corresponds to variances: positivity constraint.
(means, variances), _ = minimize(loss, optim_vars, args=(x, ), bounds=bounds)

print(means, variances)  # Notice that they have the correct shape