コード例 #1
0
def test_nonnegative_positive_part(debug=False):
    """
    This test verifies that using nonnegative constraint
    with a linear term, with some unpenalized terms yields the same result
    as using separable with constrained_positive_part and nonnegative
    """
    import numpy as np
    import regreg.api as rr
    import regreg.atoms as rra

    # N - number of data points
    # P - number of columns in design == number of betas
    N, P = 40, 30
    # an arbitrary positive offset for data and design
    offset = 2
    # data
    Y = np.random.normal(size=(N, )) + offset
    # design - with ones as last column
    X = np.ones((N, P))
    X[:, :-1] = np.random.normal(size=(N, P - 1)) + offset
    # coef for loss
    coef = 0.5
    # lagrange for penalty
    lagrange = .1

    # Loss function (squared difference between fitted and actual data)
    loss = rr.quadratic_loss.affine(X, -Y, coef=coef)

    # Penalty using nonnegative, leave the last 5 unpenalized but
    # nonnegative
    weights = np.ones(P) * lagrange
    weights[-5:] = 0
    linq = rr.identity_quadratic(0, 0, weights, 0)
    penalty = rr.nonnegative(P, quadratic=linq)

    # Solution

    composite_form = rr.separable_problem.singleton(penalty, loss)
    solver = rr.FISTA(composite_form)
    solver.debug = debug
    solver.fit(tol=1.0e-12, min_its=200)
    coefs = solver.composite.coefs

    # using the separable penalty, only penalize the first
    # 25 coefficients with constrained_positive_part

    penalties_s = [
        rr.constrained_positive_part(25, lagrange=lagrange),
        rr.nonnegative(5)
    ]
    groups_s = [slice(0, 25), slice(25, 30)]
    penalty_s = rr.separable((P, ), penalties_s, groups_s)
    composite_form_s = rr.separable_problem.singleton(penalty_s, loss)
    solver_s = rr.FISTA(composite_form_s)
    solver_s.debug = debug
    solver_s.fit(tol=1.0e-12, min_its=200)
    coefs_s = solver_s.composite.coefs

    nt.assert_true(
        np.linalg.norm(coefs - coefs_s) / np.linalg.norm(coefs) < 1.0e-02)
コード例 #2
0
ファイル: test_separable.py プロジェクト: bnaul/regreg
def test_nonnegative_positive_part(debug=False):
    """
    This test verifies that using nonnegative constraint
    with a linear term, with some unpenalized terms yields the same result
    as using separable with constrained_positive_part and nonnegative
    """
    import numpy as np
    import regreg.api as rr
    import regreg.atoms as rra

    # N - number of data points
    # P - number of columns in design == number of betas
    N, P = 40, 30
    # an arbitrary positive offset for data and design
    offset = 2
    # data
    Y = np.random.normal(size=(N,)) + offset
    # design - with ones as last column
    X = np.ones((N,P))
    X[:,:-1] = np.random.normal(size=(N,P-1)) + offset
    # coef for loss
    coef = 0.5
    # lagrange for penalty
    lagrange = .1

    # Loss function (squared difference between fitted and actual data)
    loss = rr.quadratic.affine(X, -Y, coef=coef)

    # Penalty using nonnegative, leave the last 5 unpenalized but
    # nonnegative
    weights = np.ones(P) * lagrange
    weights[-5:] = 0
    linq = rr.identity_quadratic(0,0,weights,0)
    penalty = rr.nonnegative(P, quadratic=linq)

    # Solution

    composite_form = rr.separable_problem.singleton(penalty, loss)
    solver = rr.FISTA(composite_form)
    solver.debug = debug
    solver.fit(tol=1.0e-12, min_its=200)
    coefs = solver.composite.coefs

    # using the separable penalty, only penalize the first
    # 25 coefficients with constrained_positive_part

    penalties_s = [rr.constrained_positive_part(25, lagrange=lagrange),
                   rr.nonnegative(5)]
    groups_s = [slice(0,25), slice(25,30)]
    penalty_s = rr.separable((P,), penalties_s,
                             groups_s)
    composite_form_s = rr.separable_problem.singleton(penalty_s, loss)
    solver_s = rr.FISTA(composite_form_s)
    solver_s.debug = debug
    solver_s.fit(tol=1.0e-12, min_its=200)
    coefs_s = solver_s.composite.coefs

    nt.assert_true(np.linalg.norm(coefs - coefs_s) / np.linalg.norm(coefs) < 1.0e-02)
コード例 #3
0
ファイル: regreg_examples.py プロジェクト: arokem/spheredwi
def example4(lambda1=10):

    #Example with an initial value for backtracking

    # In the previous examples you'll see a lot of "Increasing inv_step" iterations - these are trying to find an approximate Lipschitz constant in a backtracking loop.
    # For your problem the Lipschitz constant is just the largest eigenvalue of X^TX, so you can precompute this with a few power iterations.

    n = 100
    p = 1000

    X = np.random.standard_normal(n*p).reshape((n,p))
    Y = 10*np.random.standard_normal(n)

    v = np.random.standard_normal(p)
    for i in range(10):
        v = np.dot(X.T, np.dot(X,v))
        norm = np.linalg.norm(v)
        v /= norm
    print "Approximate Lipschitz constant is", norm

    loss = rr.l2normsq.affine(X,-Y,coef=1.)
    sparsity = rr.l1norm(p, lagrange = lambda1)
    nonnegative = rr.nonnegative(p)

    problem = rr.container(loss, sparsity, nonnegative)
    solver = rr.FISTA(problem)

    #Give approximate Lipschitz constant to solver
    solver.fit(debug=True, start_inv_step=norm)

    solution = solver.composite.coefs
コード例 #4
0
ファイル: regreg_examples.py プロジェクト: arokem/spheredwi
def example3(lambda1=10):

    #Example using a smooth approximation to the non-negativity constraint
    # On large problems this might be faster than using the actual constraint

    n = 100
    p = 1000


    X = np.random.standard_normal(n*p).reshape((n,p))
    Y = 10*np.random.standard_normal(n)

    loss = rr.l2normsq.affine(X,-Y,coef=1.)
    sparsity = rr.l1norm(p, lagrange = lambda1)
    nonnegative = rr.nonnegative(p)
    smooth_nonnegative = rr.smoothed_atom(nonnegative, epsilon = 1e-4)

    problem = rr.container(loss, sparsity, smooth_nonnegative)
    solver = rr.FISTA(problem)
    solver.fit(debug=True)

    solution1 = solver.composite.coefs



    loss = rr.l2normsq.affine(X,-Y,coef=1.)
    sparsity = rr.l1norm(p, lagrange = lambda1)
    nonnegative = rr.nonnegative(p)

    problem = rr.container(loss, sparsity, nonnegative)
    solver = rr.FISTA(problem)
    solver.fit(debug=True)

    solution2 = solver.composite.coefs


    pl.subplot(1,2,1)
    pl.hist(solution1, bins=40)

    pl.subplot(1,2,2)
    pl.scatter(solution2,solution1)
    pl.xlabel("Constraint")
    pl.ylabel("Smooth constraint")
コード例 #5
0
ファイル: regreg_examples.py プロジェクト: arokem/spheredwi
def example2(lambda1=10):

    #Example with a non-identity X

    n = 100
    p = 1000

    X = np.random.standard_normal(n*p).reshape((n,p))
    Y = 10*np.random.standard_normal(n)

    loss = rr.l2normsq.affine(X,-Y,coef=1.)
    sparsity = rr.l1norm(p, lagrange = lambda1)
    nonnegative = rr.nonnegative(p)

    problem = rr.container(loss, sparsity, nonnegative)
    solver = rr.FISTA(problem)
    solver.fit(debug=True)

    solution = solver.composite.coefs
コード例 #6
0
ファイル: regreg_examples.py プロジェクト: arokem/spheredwi
def example1(lambda1=10):

    #Example with X = np.identity(n)
    #Try varying lambda1 to see shrinkage

    n = 100

    Y = 10*np.random.standard_normal(n)

    loss = rr.l2normsq.shift(-Y,coef=1.)
    sparsity = rr.l1norm(n, lagrange = lambda1)
    nonnegative = rr.nonnegative(n)

    problem = rr.container(loss, sparsity, nonnegative)
    solver = rr.FISTA(problem)
    solver.fit(debug=True)

    solution = solver.composite.coefs

    pl.plot(Y, color='red', label='Y')
    pl.plot(solution, color='blue', label='beta')
    pl.legend()
コード例 #7
0
ファイル: regreggie.py プロジェクト: Garyfallidis/trn
    # Make pure fiber data to test against
    test_direction = np.array([1,2,3])/np.sqrt(14.)
    #print np.sum(test_direction**2)
    #unit vector for test signal direction
    Y = signal_1_stick(S0,bvals[1:],d,test_direction,gradients)
    # Y = design[:,3]

    # Set up model according to Jonathan's recipe
    N, P = X.shape
    # 'coef' becomes 'lagrange' in Brad's branch
    loss = rr.l2normsq.affine(X,-Y, coef=0.5)
    # Select up to (not including) last column to penalize
    lagrange = 500
    weights = np.ones(P) * lagrange
    weights[-1] = 0
    penalty = rr.nonnegative(P, lagrange=1, linear_term=weights)
    # Neighborhood weightings
    coses2 = orientations.dot(orientations.T) ** 2
    nearests = np.argmax(coses2 - np.eye(P-1), axis=1)
    edges = {}
    for i, nn in enumerate(nearests):
        if nn > i:
            tup = (i, nn)
        else:
            tup = (nn, i)
        if tup not in edges:
            edges[tup] = 1
    rows, cols = zip(*edges.keys())
    n_edges = len(rows)
    angle_D = np.zeros((n_edges, P))
    angle_D[range(n_edges), rows] = 1
コード例 #8
0
ファイル: test_normalize.py プロジェクト: kevinbfry/regreg
def test_centering_fit_inplace(debug=False):

    # N - number of data points
    # P - number of columns in design == number of betas
    N, P = 40, 30
    # an arbitrary positive offset for data and design
    offset = 2

    # design - with ones as last column
    X = np.random.normal(size=(N, P)) + offset
    L = rr.normalize(X, center=True, scale=False, inplace=True)

    # X should have been normalized in place
    np.testing.assert_almost_equal(np.sum(X, 0), 0)

    # data
    Y = np.random.normal(size=(N, )) + offset

    # coef for loss
    coef = 0.5
    # lagrange for penalty
    lagrange = .1

    # Loss function (squared difference between fitted and actual data)
    loss = rr.quadratic_loss.affine(L, -Y, coef=coef)

    penalties = [
        rr.constrained_positive_part(25, lagrange=lagrange),
        rr.nonnegative(5)
    ]
    groups = [slice(0, 25), slice(25, 30)]
    penalty = rr.separable((P, ), penalties, groups)
    initial = np.random.standard_normal(P)

    composite_form = rr.separable_problem.fromatom(penalty, loss)

    solver = rr.FISTA(composite_form)
    solver.debug = debug
    solver.fit(tol=1.0e-12, min_its=200)
    coefs = solver.composite.coefs

    # Solve the problem with X, which has been normalized in place
    loss2 = rr.quadratic_loss.affine(X, -Y, coef=coef)

    initial2 = np.random.standard_normal(P)
    composite_form2 = rr.separable_problem.fromatom(penalty, loss2)

    solver2 = rr.FISTA(composite_form2)
    solver2.debug = debug
    solver2.fit(tol=1.0e-12, min_its=200)
    coefs2 = solver2.composite.coefs

    for _ in range(10):
        beta = np.random.standard_normal(P)
        g1 = loss.smooth_objective(beta, mode='grad')
        g2 = loss2.smooth_objective(beta, mode='grad')
        np.testing.assert_almost_equal(g1, g2)
        b1 = penalty.proximal(sq(1, beta, g1, 0))
        b2 = penalty.proximal(sq(1, beta, g2, 0))
        np.testing.assert_almost_equal(b1, b2)

        f1 = composite_form.objective(beta)
        f2 = composite_form2.objective(beta)
        np.testing.assert_almost_equal(f1, f2)

    np.testing.assert_almost_equal(composite_form.objective(coefs),
                                   composite_form.objective(coefs2))
    np.testing.assert_almost_equal(composite_form2.objective(coefs),
                                   composite_form2.objective(coefs2))

    nt.assert_true(
        np.linalg.norm(coefs - coefs2) /
        max(np.linalg.norm(coefs), 1) < 1.0e-04)
コード例 #9
0
def test_centering_fit_inplace(debug=False):

    # N - number of data points
    # P - number of columns in design == number of betas
    N, P = 40, 30
    # an arbitrary positive offset for data and design
    offset = 2

    # design - with ones as last column
    X = np.random.normal(size=(N,P)) + offset
    L = rr.normalize(X, center=True, scale=False, inplace=True)

    # X should have been normalized in place
    np.testing.assert_almost_equal(np.sum(X, 0), 0)

    # data
    Y = np.random.normal(size=(N,)) + offset

    # coef for loss
    coef = 0.5
    # lagrange for penalty
    lagrange = .1

    # Loss function (squared difference between fitted and actual data)
    loss = rr.quadratic.affine(L, -Y, coef=coef)

    penalties = [rr.constrained_positive_part(25, lagrange=lagrange),
                 rr.nonnegative(5)]
    groups = [slice(0,25), slice(25,30)]
    penalty = rr.separable((P,), penalties,
                           groups)
    initial = np.random.standard_normal(P)

    composite_form = rr.separable_problem.fromatom(penalty, loss)

    solver = rr.FISTA(composite_form)
    solver.debug = debug
    solver.fit(tol=1.0e-12, min_its=200)
    coefs = solver.composite.coefs

    # Solve the problem with X, which has been normalized in place
    loss2 = rr.quadratic.affine(X, -Y, coef=coef)

    initial2 = np.random.standard_normal(P)
    composite_form2 = rr.separable_problem.fromatom(penalty, loss2)

    solver2 = rr.FISTA(composite_form2)
    solver2.debug = debug
    solver2.fit(tol=1.0e-12, min_its=200)
    coefs2 = solver2.composite.coefs

    for _ in range(10):
        beta = np.random.standard_normal(P)
        g1 = loss.smooth_objective(beta, mode='grad')
        g2 = loss2.smooth_objective(beta, mode='grad')
        np.testing.assert_almost_equal(g1, g2)
        b1 = penalty.proximal(sq(1, beta, g1,0))
        b2 = penalty.proximal(sq(1, beta, g2,0))
        np.testing.assert_almost_equal(b1, b2)

        f1 = composite_form.objective(beta)
        f2 = composite_form2.objective(beta)
        np.testing.assert_almost_equal(f1, f2)


    np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))
    np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))

    nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)
コード例 #10
0
def test_scaling_and_centering_intercept_fit(debug=False):

    # N - number of data points
    # P - number of columns in design == number of betas
    N, P = 40, 30
    # an arbitrary positive offset for data and design
    offset = 2

    # design - with ones as last column
    X = np.random.normal(size=(N, P)) + 0 * offset
    X2 = X - X.mean(0)[None, :]
    X2 = X2 / np.std(X2, 0, ddof=1)[None, :]
    X2 = np.hstack([np.ones((X2.shape[0], 1)), X2])

    L = rr.normalize(X, center=True, scale=True, intercept=True)
    # data
    Y = np.random.normal(size=(N, )) + offset

    # lagrange for penalty
    lagrange = .1

    # Loss function (squared difference between fitted and actual data)
    loss = rr.squared_error(L, Y)

    penalties = [
        rr.constrained_positive_part(25, lagrange=lagrange),
        rr.nonnegative(5)
    ]
    groups = [slice(0, 25), slice(25, 30)]
    penalty = rr.separable((P + 1, ), penalties, groups)

    initial = np.random.standard_normal(P + 1)
    composite_form = rr.separable_problem.fromatom(penalty, loss)
    solver = rr.FISTA(composite_form)
    solver.debug = debug
    solver.fit(tol=1.0e-12, min_its=200)
    coefs = solver.composite.coefs

    # Solve the problem with X2
    loss2 = rr.squared_error(X2, Y)

    initial2 = np.random.standard_normal(P + 1)
    composite_form2 = rr.separable_problem.fromatom(penalty, loss2)

    solver2 = rr.FISTA(composite_form2)
    solver2.debug = debug
    solver2.fit(tol=1.0e-12, min_its=200)
    coefs2 = solver2.composite.coefs

    for _ in range(10):
        beta = np.random.standard_normal(P + 1)
        g1 = loss.smooth_objective(beta, mode='grad')
        g2 = loss2.smooth_objective(beta, mode='grad')
        np.testing.assert_almost_equal(g1, g2)
        b1 = penalty.proximal(sq(1, beta, g1, 0))
        b2 = penalty.proximal(sq(1, beta, g2, 0))
        np.testing.assert_almost_equal(b1, b2)

        f1 = composite_form.objective(beta)
        f2 = composite_form2.objective(beta)
        np.testing.assert_almost_equal(f1, f2)

    np.testing.assert_almost_equal(composite_form.objective(coefs),
                                   composite_form.objective(coefs2))
    np.testing.assert_almost_equal(composite_form2.objective(coefs),
                                   composite_form2.objective(coefs2))

    nt.assert_true(
        np.linalg.norm(coefs - coefs2) /
        max(np.linalg.norm(coefs), 1) < 1.0e-04)
コード例 #11
0
ファイル: recon_fiber.py プロジェクト: arokem/spheredwi
    print('Solving L1 penalized system\n'
          '\n'
          '    ||Ax - b|| + lam |x|_1, subject to x_i >= 0.'
          '\n\n'
          'Here A is reproducing kernel in sparse matrix form.\n'
          'b is the measured signal and x are the coefficents in\n'
          'the sparse represenation.\n\n')

    A = A_new
    b = rSig

    # Set up the problem
    loss = rr.l2normsq.affine(A, -b, coef=1.)
    sparsity = rr.l1norm(n_qpnts, lagrange=lamb)
    constraint = rr.nonnegative(n_qpnts)

    problem = rr.container(loss, sparsity, constraint)

    # Call the solver
    solver = rr.FISTA(problem)
    solver.fit(debug=False)
    nd_coefs_l1 = solver.composite.coefs

    # Cutoff those coefficients that are less than cutoff
    cutoff = nd_coefs_l1.mean() + 2.5*nd_coefs_l1.std(ddof=1)
    nd_coefs_l1_trim = np.where(nd_coefs_l1 > cutoff, nd_coefs_l1, 0)

    # Get indices needed for sorting coefs, in reverse order.
    sortedIndex = nd_coefs_l1_trim.argsort()[::-1]
    # number of significant coefficients