Exemple #1
0
def test_affine_sum():

    n = 100
    p = 25

    X1 = np.random.standard_normal((n, p))
    X2 = np.random.standard_normal((n, p))
    b = np.random.standard_normal(n)
    v = np.random.standard_normal(p)

    transform1 = rr.affine_transform(X1, b)
    transform2 = rr.linear_transform(X2)
    sum_transform = rr.affine_sum([transform1, transform2])

    yield assert_array_almost_equal, np.dot(X1, v) + np.dot(
        X2, v) + b, sum_transform.affine_map(v)
    yield assert_array_almost_equal, np.dot(X1, v) + np.dot(
        X2, v), sum_transform.linear_map(v)
    yield assert_array_almost_equal, np.dot(X1.T, b) + np.dot(
        X2.T, b), sum_transform.adjoint_map(b)
    yield assert_array_almost_equal, b, sum_transform.affine_offset

    sum_transform = rr.affine_sum([transform1, transform2], weights=[3, 4])

    yield assert_array_almost_equal, 3 * (np.dot(X1, v) + b) + 4 * (np.dot(
        X2, v)), sum_transform.affine_map(v)
    yield assert_array_almost_equal, 3 * np.dot(X1, v) + 4 * np.dot(
        X2, v), sum_transform.linear_map(v)
    yield assert_array_almost_equal, 3 * np.dot(X1.T, b) + 4 * np.dot(
        X2.T, b), sum_transform.adjoint_map(b)
    yield assert_array_almost_equal, 3 * b, sum_transform.affine_offset
def test_multinomial_vs_logistic():

    """
    Test that multinomial regression with two categories is the same as logistic regression
    """

    n = 500
    p = 10
    J = 2

    X = np.random.standard_normal(n*p).reshape((n,p))
    counts = np.random.randint(0,10,n*J).reshape((n,J)) + 2

    mult_x = rr.linear_transform(X, input_shape=(p,J-1))
    loss = rr.multinomial_deviance.linear(mult_x, counts=counts)
    problem = rr.container(loss)
    solver = rr.FISTA(problem)
    solver.fit(debug=False, tol=1e-10)
    coefs1 = solver.composite.coefs

    loss = rr.logistic_deviance.linear(X, successes=counts[:,0], trials = np.sum(counts, axis=1))
    problem = rr.container(loss)
    solver = rr.FISTA(problem)
    solver.fit(debug=False, tol=1e-10)
    coefs2 = solver.composite.coefs

    loss = rr.logistic_deviance.linear(X, successes=counts[:,1], trials = np.sum(counts, axis=1))
    problem = rr.container(loss)
    solver = rr.FISTA(problem)
    solver.fit(debug=False, tol=1e-10)
    coefs3 = solver.composite.coefs

    npt.assert_equal(coefs1.shape,(p,J-1))
    npt.assert_array_almost_equal(coefs1.flatten(), coefs2.flatten(), 5)
    npt.assert_array_almost_equal(coefs1.flatten(), -coefs3.flatten(), 5)
Exemple #3
0
def test_affine_sum():

    n = 100
    p = 25

    X1 = np.random.standard_normal((n,p))
    X2 = np.random.standard_normal((n,p))
    b = np.random.standard_normal(n)
    v = np.random.standard_normal(p)

    transform1 = rr.affine_transform(X1,b)
    transform2 = rr.linear_transform(X2)
    sum_transform = rr.affine_sum([transform1, transform2])

    yield assert_array_almost_equal, np.dot(X1,v) + np.dot(X2,v) + b, sum_transform.affine_map(v)
    yield assert_array_almost_equal, np.dot(X1,v) + np.dot(X2,v), sum_transform.linear_map(v)
    yield assert_array_almost_equal, np.dot(X1.T,b) + np.dot(X2.T,b), sum_transform.adjoint_map(b)
    yield assert_array_almost_equal, b, sum_transform.offset_map(v)
    yield assert_array_almost_equal, b, sum_transform.affine_offset


    sum_transform = rr.affine_sum([transform1, transform2], weights=[3,4])

    yield assert_array_almost_equal, 3*(np.dot(X1,v) + b) + 4*(np.dot(X2,v)), sum_transform.affine_map(v)
    yield assert_array_almost_equal, 3*np.dot(X1,v) + 4*np.dot(X2,v), sum_transform.linear_map(v)
    yield assert_array_almost_equal, 3*np.dot(X1.T,b) + 4*np.dot(X2.T,b), sum_transform.adjoint_map(b)
    yield assert_array_almost_equal, 3*b, sum_transform.offset_map(v)
    yield assert_array_almost_equal, 3*b, sum_transform.affine_offset
Exemple #4
0
def test_coefs_matrix():

    n, p, q = 20, 10, 5

    X = np.random.standard_normal((n, p))
    B = np.random.standard_normal((n, q))
    V = np.random.standard_normal((p, q))
    Y = np.random.standard_normal((n, q))

    transform1 = rr.linear_transform(X, input_shape=(p, q))
    assert_equal(transform1.linear_map(V).shape, (n, q))
    assert_equal(transform1.affine_map(V).shape, (n, q))
    assert_equal(transform1.adjoint_map(Y).shape, (p, q))

    transform2 = rr.affine_transform(X, B, input_shape=(p, q))
    assert_equal(transform2.linear_map(V).shape, (n, q))
    assert_equal(transform2.affine_map(V).shape, (n, q))
    assert_equal(transform2.adjoint_map(Y).shape, (p, q))
Exemple #5
0
def test_coefs_matrix():

    n, p, q = 20, 10, 5

    X = np.random.standard_normal((n, p))
    B = np.random.standard_normal((n, q))
    V = np.random.standard_normal((p, q))
    Y = np.random.standard_normal((n, q))

    transform1 = rr.linear_transform(X, input_shape=(p,q))
    assert_equal(transform1.linear_map(V).shape, (n,q))
    assert_equal(transform1.affine_map(V).shape, (n,q))
    assert_equal(transform1.adjoint_map(Y).shape, (p,q))

    transform2 = rr.affine_transform(X, B, input_shape=(p,q))
    assert_equal(transform2.linear_map(V).shape, (n,q))
    assert_equal(transform2.affine_map(V).shape, (n,q))
    assert_equal(transform2.adjoint_map(Y).shape, (p,q))
Exemple #6
0
def test_row_matrix():
    # make sure we can input a vector as a transform

    n, p = 20, 1
    x = np.random.standard_normal(n)
    b = np.random.standard_normal(p)
    v = np.random.standard_normal(n)
    y = np.random.standard_normal(p)

    transform1 = rr.linear_transform(x)
    transform2 = rr.affine_transform(x, b)

    transform1.linear_map(v)
    transform1.affine_map(v)
    transform1.adjoint_map(y)

    transform2.linear_map(v)
    transform2.affine_map(v)
    transform2.adjoint_map(y)
Exemple #7
0
def test_row_matrix():
    # make sure we can input a vector as a transform

    n, p = 20, 1
    x = np.random.standard_normal(n)
    b = np.random.standard_normal(p)
    v = np.random.standard_normal(n)
    y = np.random.standard_normal(p)

    transform1 = rr.linear_transform(x)
    transform2 = rr.affine_transform(x, b)

    transform1.linear_map(v)
    transform1.affine_map(v)
    transform1.adjoint_map(y)

    transform2.linear_map(v)
    transform2.affine_map(v)
    transform2.adjoint_map(y)
Exemple #8
0
X -= X.mean(0)[np.newaxis, :]

X_1 = np.hstack([X, np.ones((N, 1))])
transform = rr.affine_transform(-Y[:, np.newaxis] * X_1, np.ones(N))
C = 0.2
hinge = rr.positive_part(N, lagrange=C)
hinge_loss = rr.linear_atom(hinge, transform)
epsilon = 0.04
smoothed_hinge_loss = rr.smoothed_atom(hinge_loss, epsilon=epsilon)

s = rr.selector(slice(0, P), (P + 1, ))
sparsity = rr.l1norm.linear(s, lagrange=3.)
quadratic = rr.quadratic.linear(s, coef=0.5)

from regreg.affine import power_L
ltransform = rr.linear_transform(X_1)
singular_value_sq = power_L(X_1)
# the other smooth piece is a quadratic with identity
# for quadratic form, so its lipschitz constant is 1

lipschitz = 1.05 * singular_value_sq / epsilon + 1.1

problem = rr.container(quadratic, smoothed_hinge_loss, sparsity)
solver = rr.FISTA(problem)
solver.composite.lipschitz = lipschitz
solver.debug = True
solver.fit(backtrack=False)
solver.composite.coefs

fits = np.dot(X_1, problem.coefs)
labels = 2 * (fits > 0) - 1
import pylab

X = np.random.standard_normal((500, 1000))

beta = np.zeros(1000)
beta[:100] = 3 * np.sqrt(2 * np.log(1000))

Y = np.random.standard_normal((500, )) + np.dot(X, beta)
Xnorm = scipy.linalg.eigvalsh(np.dot(X.T, X), eigvals=(998, 999)).max()

import regreg.api as R
from regreg.smooth import linear
smooth_linf_constraint = R.smoothed_atom(R.supnorm(1000, bound=1),
                                         epsilon=0.01,
                                         store_argmin=True)
transform = R.linear_transform(-X.T)
loss = R.affine_smooth(smooth_linf_constraint, transform)

norm_Y = np.linalg.norm(Y)
l2_constraint_value = np.sqrt(0.1) * norm_Y
l2_lagrange = R.l2norm(500, lagrange=l2_constraint_value)

basis_pursuit = R.container(loss, linear(Y), l2_lagrange)
solver = R.FISTA(basis_pursuit)
tol = 1.0e-08

for epsilon in [0.6**i for i in range(20)]:
    smooth_linf_constraint.epsilon = epsilon
    solver.composite.lipschitz = 1.1 / epsilon * Xnorm
    solver.fit(max_its=2000, tol=tol, min_its=10, backtrack=False)
X_1 = np.hstack([X, np.ones((N,1))])
transform = rr.affine_transform(-Y[:,np.newaxis] * X_1, np.ones(N))
C = 0.2
hinge = rr.positive_part(N, lagrange=C)
hinge_loss = rr.linear_atom(hinge, transform)
epsilon = 0.04
smoothed_hinge_loss = rr.smoothed_atom(hinge_loss, epsilon=epsilon)

s = rr.selector(slice(0,P), (P+1,))
sparsity = rr.l1norm.linear(s, lagrange=3.)
quadratic = rr.quadratic.linear(s, coef=0.5)


from regreg.affine import power_L
ltransform = rr.linear_transform(X_1)
singular_value_sq = power_L(X_1)
# the other smooth piece is a quadratic with identity
# for quadratic form, so its lipschitz constant is 1

lipschitz = 1.05 * singular_value_sq / epsilon + 1.1


problem = rr.container(quadratic, 
                       smoothed_hinge_loss, sparsity)
solver = rr.FISTA(problem)
solver.composite.lipschitz = lipschitz
solver.debug = True
solver.fit(backtrack=False)
solver.composite.coefs
import pylab

X = np.random.standard_normal((500,1000))

beta = np.zeros(1000)
beta[:100] = 3 * np.sqrt(2 * np.log(1000))

Y = np.random.standard_normal((500,)) + np.dot(X, beta)
Xnorm = scipy.linalg.eigvalsh(np.dot(X.T,X), eigvals=(998,999)).max()

import regreg.api as R
from regreg.smooth import linear
smooth_linf_constraint = R.smoothed_atom(R.supnorm(1000, bound=1),
                                        epsilon=0.01,
                                        store_argmin=True)
transform = R.linear_transform(-X.T)
loss = R.affine_smooth(smooth_linf_constraint, transform)


norm_Y = np.linalg.norm(Y)
l2_constraint_value = np.sqrt(0.1) * norm_Y
l2_lagrange = R.l2norm(500, lagrange=l2_constraint_value)

basis_pursuit = R.container(loss, linear(Y), l2_lagrange)
solver = R.FISTA(basis_pursuit)
tol = 1.0e-08

for epsilon in [0.6**i for i in range(20)]:
   smooth_linf_constraint.epsilon = epsilon
   solver.composite.lipschitz = 1.1/epsilon * Xnorm
   solver.fit(max_its=2000, tol=tol, min_its=10, backtrack=False)