Esempio n. 1
0
def test_changepoint_scaled():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(np.sqrt(M.sizes) * X.adjoint_map(Y) / (1 + np.sqrt(np.log(M.sizes)))).max()

    penalty = rr.weighted_l1norm((1 + np.sqrt(np.log(M.sizes))) / np.sqrt(M.sizes), lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    if INTERACTIVE:
        plt.scatter(np.arange(p), Y)
        plt.plot(np.arange(p), Yhat)
        plt.show()
Esempio n. 2
0
def test_misc():
    X = np.random.standard_normal((40, 5))
    power_L(X)
    Xa = rr.astransform(X)
    np.testing.assert_allclose(todense(Xa), X)

    reshapeA = adjoint(reshape((30, ), (6, 5)))
    assert_raises(NotImplementedError, todense, reshapeA)
Esempio n. 3
0
def test_misc():
    X = np.random.standard_normal((40, 5))
    power_L(X)
    Xa = rr.astransform(X)
    np.testing.assert_allclose(todense(Xa), X)

    reshapeA = adjoint(reshape((30,), (6,5)))
    assert_raises(NotImplementedError, todense, reshapeA)
Esempio n. 4
0
def test_adjoint():
    X = np.random.standard_normal((20, 30))
    b = np.random.standard_normal(20)
    L = affine_transform(X, b)

    z = np.random.standard_normal(30)
    w = np.random.standard_normal(20)
    A = adjoint(L)

    assert_array_almost_equal(A.linear_map(w), L.adjoint_map(w))
    assert_array_almost_equal(A.affine_map(w), L.adjoint_map(w))
    assert_array_almost_equal(A.adjoint_map(z), L.linear_map(z))
Esempio n. 5
0
def test_adjoint():
    X = np.random.standard_normal((20,30))
    b = np.random.standard_normal(20)
    L = affine_transform(X, b)

    z = np.random.standard_normal(30)
    w = np.random.standard_normal(20)
    A = adjoint(L)

    assert_array_equal(A.linear_map(w), L.adjoint_map(w))
    assert_array_equal(A.affine_map(w), L.adjoint_map(w))
    assert_array_equal(A.adjoint_map(z), L.linear_map(z))
Esempio n. 6
0
def test_choose_parameter(delta=2, p=60):

    signal = np.zeros(p)
    signal[(p//2):] += delta
    Z = np.random.standard_normal(p) + signal
    p = Z.shape[0]
    M = multiscale(p)
    M.scaling = np.sqrt(M.sizes)
    lam = choose_tuning_parameter(M)
    weights = (lam + np.sqrt(2 * np.log(p / M.sizes))) / np.sqrt(p)

    Z0 = Z - Z.mean()
    loss = rr.squared_error(ra.adjoint(M), Z0)
    penalty = rr.weighted_l1norm(weights, lagrange=1.)
    problem = rr.simple_problem(loss, penalty)
    coef = problem.solve()
    active = coef != 0

    if active.sum():
        X = M.form_matrix(M.slices[active])[0]
Esempio n. 7
0
def test_changepoint():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(X.adjoint_map(Y)).max()

    penalty = rr.l1norm(X.input_shape, lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    plt.scatter(np.arange(p), Y)
    plt.plot(np.arange(p), Yhat)
Esempio n. 8
0
def test_changepoint():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(X.adjoint_map(Y)).max()

    penalty = rr.l1norm(X.input_shape, lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    plt.scatter(np.arange(p), Y)
    plt.plot(np.arange(p), Yhat)
    plt.show()