Ejemplo n.º 1
0
def test_pre_fastica():
    N, T = 3, 1000
    rng = np.random.RandomState(42)
    names = ['tanh', 'cube']
    for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']):
        if j == 0:
            S = rng.laplace(size=(N, T))
        else:
            S = rng.uniform(low=-1, high=1, size=(N, T))
        A = rng.randn(N, N)
        X = np.dot(A, S)
        K, W, Y = picard(X.copy(),
                         fun=fun,
                         ortho=False,
                         random_state=0,
                         fastica_it=10)
        if fun == 'tanh':
            fun = Tanh()
        elif fun == 'exp':
            fun = Exp()
        elif fun == 'cube':
            fun = Cube()
        # Get the final gradient norm
        psiY = fun.score_and_der(Y)[0]
        G = np.inner(psiY, Y) / float(T) - np.eye(N)
        err_msg = 'fun %s, gradient norm greater than tol' % names[j]
        assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg)
        assert_equal(Y.shape, X.shape)
        assert_equal(W.shape, A.shape)
        assert_equal(K.shape, A.shape)
        WA = W.dot(K).dot(A)
        WA = permute(WA)  # Permute and scale
        err_msg = 'fun %s, wrong unmixing matrix' % names[j]
        assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg)
Ejemplo n.º 2
0
def test_picardo():
    N, T = 3, 2000
    rng = np.random.RandomState(4)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    X = np.dot(A, S)
    names = ['tanh', 'exp', 'cube']
    for fastica_it in [None, 2]:
        for fun in names:
            print(fun)
            K, W, Y = picard(X.copy(),
                             fun=fun,
                             ortho=True,
                             random_state=rng,
                             fastica_it=fastica_it,
                             verbose=True)
            if fun == 'tanh':
                fun = Tanh()
            elif fun == 'exp':
                fun = Exp()
            elif fun == 'cube':
                fun = Cube()
            # Get the final gradient norm
            psiY = fun.score_and_der(Y)[0]
            G = np.inner(psiY, Y) / float(T) - np.eye(N)
            G = (G - G.T) / 2.  # take skew-symmetric part
            err_msg = 'fun %s, gradient norm greater than tol' % fun
            assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg)
            assert_equal(Y.shape, X.shape)
            assert_equal(W.shape, A.shape)
            assert_equal(K.shape, A.shape)
            WA = W.dot(K).dot(A)
            WA = permute(WA)  # Permute and scale
            err_msg = 'fun %s, wrong unmixing matrix' % fun
            assert_allclose(WA, np.eye(N), rtol=0, atol=0.1, err_msg=err_msg)
Ejemplo n.º 3
0
def test_shift():
    N, T = 5, 1000
    rng = np.random.RandomState(42)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    offset = rng.randn(N)
    X = np.dot(A, S) + offset[:, None]
    _, W, Y, X_mean = picard(X.copy(),
                             ortho=False,
                             whiten=False,
                             return_X_mean=True,
                             random_state=rng)
    assert_allclose(offset, X_mean, rtol=0, atol=0.2)
    WA = W.dot(A)
    WA = permute(WA)
    assert_allclose(WA, np.eye(N), rtol=0, atol=0.2)
Ejemplo n.º 4
0
def test_extended():
    N, T = 4, 2000
    n = N // 2
    rng = np.random.RandomState(42)

    S = np.concatenate(
        (rng.laplace(size=(n, T)), rng.uniform(low=-1, high=1, size=(n, T))),
        axis=0)
    print(S.shape)
    A = rng.randn(N, N)
    X = np.dot(A, S)
    K, W, Y = picard(X, ortho=False, random_state=0, extended=True)
    assert Y.shape == X.shape
    assert W.shape == A.shape
    assert K.shape == A.shape
    WA = W.dot(K).dot(A)
    WA = permute(WA)  # Permute and scale
    err_msg = 'wrong unmixing matrix'
    assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg)
Ejemplo n.º 5
0
###############################################################################
# Plot the corresponding functions

x = np.linspace(-2, 2, 100)
log_likelihood = custom_density.log_lik(x)
psi, psi_der = custom_density.score_and_der(x)

names = ['log-likelihood', 'score', 'score derivative']

plt.figure()
for values, name in zip([log_likelihood, psi, psi_der], names):
    plt.plot(x, values, label=name)
plt.legend()
plt.title("Custom density")
plt.show()

###############################################################################
# Run Picard on toy dataset using this density

rng = np.random.RandomState(0)
N, T = 5, 1000
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, fun=custom_density, random_state=0)
plt.figure()
plt.imshow(permute(W.dot(K).dot(A)), interpolation='nearest')
plt.title('Product between the estimated unmixing matrix and the mixing'
          'matrix')
plt.show()