def test_changepoint_scaled(): p = 150 M = multiscale(p) M.minsize = 10 X = ra.adjoint(M) Y = np.random.standard_normal(p) Y[20:50] += 8 Y += 2 meanY = Y.mean() lammax = np.fabs(np.sqrt(M.sizes) * X.adjoint_map(Y) / (1 + np.sqrt(np.log(M.sizes)))).max() penalty = rr.weighted_l1norm((1 + np.sqrt(np.log(M.sizes))) / np.sqrt(M.sizes), lagrange=0.5*lammax) loss = rr.squared_error(X, Y - meanY) problem = rr.simple_problem(loss, penalty) soln = problem.solve() Yhat = X.linear_map(soln) Yhat += meanY if INTERACTIVE: plt.scatter(np.arange(p), Y) plt.plot(np.arange(p), Yhat) plt.show()
def test_misc(): X = np.random.standard_normal((40, 5)) power_L(X) Xa = rr.astransform(X) np.testing.assert_allclose(todense(Xa), X) reshapeA = adjoint(reshape((30, ), (6, 5))) assert_raises(NotImplementedError, todense, reshapeA)
def test_misc(): X = np.random.standard_normal((40, 5)) power_L(X) Xa = rr.astransform(X) np.testing.assert_allclose(todense(Xa), X) reshapeA = adjoint(reshape((30,), (6,5))) assert_raises(NotImplementedError, todense, reshapeA)
def test_adjoint(): X = np.random.standard_normal((20, 30)) b = np.random.standard_normal(20) L = affine_transform(X, b) z = np.random.standard_normal(30) w = np.random.standard_normal(20) A = adjoint(L) assert_array_almost_equal(A.linear_map(w), L.adjoint_map(w)) assert_array_almost_equal(A.affine_map(w), L.adjoint_map(w)) assert_array_almost_equal(A.adjoint_map(z), L.linear_map(z))
def test_adjoint(): X = np.random.standard_normal((20,30)) b = np.random.standard_normal(20) L = affine_transform(X, b) z = np.random.standard_normal(30) w = np.random.standard_normal(20) A = adjoint(L) assert_array_equal(A.linear_map(w), L.adjoint_map(w)) assert_array_equal(A.affine_map(w), L.adjoint_map(w)) assert_array_equal(A.adjoint_map(z), L.linear_map(z))
def test_choose_parameter(delta=2, p=60): signal = np.zeros(p) signal[(p//2):] += delta Z = np.random.standard_normal(p) + signal p = Z.shape[0] M = multiscale(p) M.scaling = np.sqrt(M.sizes) lam = choose_tuning_parameter(M) weights = (lam + np.sqrt(2 * np.log(p / M.sizes))) / np.sqrt(p) Z0 = Z - Z.mean() loss = rr.squared_error(ra.adjoint(M), Z0) penalty = rr.weighted_l1norm(weights, lagrange=1.) problem = rr.simple_problem(loss, penalty) coef = problem.solve() active = coef != 0 if active.sum(): X = M.form_matrix(M.slices[active])[0]
def test_changepoint(): p = 150 M = multiscale(p) M.minsize = 10 X = ra.adjoint(M) Y = np.random.standard_normal(p) Y[20:50] += 8 Y += 2 meanY = Y.mean() lammax = np.fabs(X.adjoint_map(Y)).max() penalty = rr.l1norm(X.input_shape, lagrange=0.5*lammax) loss = rr.squared_error(X, Y - meanY) problem = rr.simple_problem(loss, penalty) soln = problem.solve() Yhat = X.linear_map(soln) Yhat += meanY plt.scatter(np.arange(p), Y) plt.plot(np.arange(p), Yhat)
def test_changepoint(): p = 150 M = multiscale(p) M.minsize = 10 X = ra.adjoint(M) Y = np.random.standard_normal(p) Y[20:50] += 8 Y += 2 meanY = Y.mean() lammax = np.fabs(X.adjoint_map(Y)).max() penalty = rr.l1norm(X.input_shape, lagrange=0.5*lammax) loss = rr.squared_error(X, Y - meanY) problem = rr.simple_problem(loss, penalty) soln = problem.solve() Yhat = X.linear_map(soln) Yhat += meanY plt.scatter(np.arange(p), Y) plt.plot(np.arange(p), Yhat) plt.show()