Пример #1
0
def test_ztz(use_whitening):
    n_atoms = 7
    n_trials = 3
    n_channels = 5
    n_times_valid = 500
    n_times_atom = 10
    n_times = n_times_valid + n_times_atom - 1
    random_state = None

    rng = check_random_state(random_state)

    X = rng.randn(n_trials, n_channels, n_times)
    z = rng.randn(n_trials, n_atoms, n_times_valid)
    D = rng.randn(n_atoms, n_channels, n_times_atom)

    if use_whitening:
        ar_model, X = whitening(X)
        zw = apply_whitening(ar_model, z, mode="full")
        ztz = compute_ztz(zw, n_times_atom)
        grad = np.zeros(D.shape)
        for t in range(n_times_atom):
            grad[:, :, t] = np.tensordot(ztz[:, :, t:t + n_times_atom],
                                         D[:, :, ::-1],
                                         axes=([1, 2], [0, 2]))
    else:
        ztz = compute_ztz(z, n_times_atom)
        grad = tensordot_convolve(ztz, D)
    cost = np.dot(D.ravel(), grad.ravel())

    X_hat = construct_X_multi(z, D)
    if use_whitening:
        X_hat = apply_whitening(ar_model, X_hat, mode="full")

    assert np.isclose(cost, np.dot(X_hat.ravel(), X_hat.ravel()))
Пример #2
0
def test_update_z_multi_decrease_cost_function(loss, solver):
    n_trials, n_channels, n_times = 2, 3, 100
    n_times_atom, n_atoms = 10, 4
    n_times_valid = n_times - n_times_atom + 1
    reg = 0
    loss_params = dict(gamma=1, sakoe_chiba_band=n_times_atom // 2)

    rng = np.random.RandomState(0)
    X = rng.randn(n_trials, n_channels, n_times)
    uv = rng.randn(n_atoms, n_channels + n_times_atom)
    z = rng.randn(n_trials, n_atoms, n_times_valid)

    if loss == 'whitening':
        loss_params['ar_model'], X = whitening(X, ordar=10)

    loss_0 = compute_X_and_objective_multi(X=X, z_hat=z, D_hat=uv, reg=reg,
                                           feasible_evaluation=False,
                                           loss=loss, loss_params=loss_params)

    z_hat, ztz, ztX = update_z_multi(X, uv, reg, z0=z, solver=solver,
                                     loss=loss, loss_params=loss_params,
                                     return_ztz=True)

    loss_1 = compute_X_and_objective_multi(X=X, z_hat=z_hat, D_hat=uv,
                                           reg=reg, feasible_evaluation=False,
                                           loss=loss, loss_params=loss_params)
    assert loss_1 < loss_0

    if loss == 'l2':
        assert np.allclose(ztz, compute_ztz(z_hat, n_times_atom))
        assert np.allclose(ztX, compute_ztX(z_hat, X))
Пример #3
0
def test_gradient_uv(loss):
    # Generate synchronous D
    n_times_atom, n_times = 10, 100
    n_channels = 5
    n_atoms = 2
    n_trials = 3
    loss_params = dict(gamma=1, sakoe_chiba_band=n_times_atom // 2)

    rng = np.random.RandomState()
    X = rng.normal(size=(n_trials, n_channels, n_times))
    z = rng.normal(size=(n_trials, n_atoms, n_times - n_times_atom + 1))
    uv = rng.normal(size=(n_atoms, n_channels + n_times_atom)).ravel()

    if loss == 'whitening':
        loss_params['ar_model'], X = whitening(X, ordar=10)

    def func(uv0):
        uv0 = uv0.reshape(n_atoms, n_channels + n_times_atom)
        X_hat = construct_X_multi(z, D=uv0, n_channels=n_channels)
        return compute_objective(X, X_hat, loss=loss, loss_params=loss_params)

    def grad(uv0):
        return gradient_uv(uv=uv0,
                           X=X,
                           z=z,
                           flatten=True,
                           loss=loss,
                           loss_params=loss_params)

    error = optimize.check_grad(func, grad, uv.ravel(), epsilon=2e-8)
    grad_uv = grad(uv)
    n_grad = np.sqrt(np.dot(grad_uv, grad_uv))
    try:
        assert error < 1e-5 * n_grad, "Gradient is false: {:.4e}".format(error)
    except AssertionError:

        if DEBUG:
            grad_approx = optimize.approx_fprime(uv, func, 2e-8)

            import matplotlib.pyplot as plt
            plt.semilogy(abs(grad_approx - grad_uv))
            plt.figure()
            plt.plot(grad_approx, label="approx")
            plt.plot(grad_uv, '--', label="grad")
            plt.legend()
            plt.show()
        raise

    if loss == 'l2':
        constants = _get_d_update_constants(X, z)
        msg = "Wrong value for zt*X"
        assert np.allclose(
            gradient_uv(0 * uv, X=X, z=z, flatten=True),
            gradient_uv(0 * uv, constants=constants, flatten=True)), msg
        msg = "Wrong value for zt*z"
        assert np.allclose(gradient_uv(uv, X=X, z=z, flatten=True),
                           gradient_uv(uv, constants=constants,
                                       flatten=True)), msg
Пример #4
0
def test_gradients(loss):
    """Check that the gradients have the correct shape.
    """
    n_trials, n_channels, n_times = 5, 3, 100
    n_atoms, n_times_atom = 10, 15

    n_checks = 5
    if loss == "dtw":
        n_checks = 1

    loss_params = dict(gamma=.01)

    n_times_valid = n_times - n_times_atom + 1

    X = np.random.randn(n_trials, n_channels, n_times)
    z = np.random.randn(n_trials, n_atoms, n_times_valid)

    uv = np.random.randn(n_atoms, n_channels + n_times_atom)
    D = get_D(uv, n_channels)
    if loss == "whitening":
        loss_params['ar_model'], X = whitening(X)

    # Test gradient D
    assert D.shape == _gradient_d(X, z, D, loss, loss_params=loss_params).shape

    def pobj(ds):
        return _objective(X, z, ds.reshape(n_atoms, n_channels, -1), loss,
                          loss_params=loss_params)

    def grad(ds):
        return _gradient_d(X, z, ds, loss=loss, flatten=True,
                           loss_params=loss_params)

    gradient_checker(pobj, grad, np.prod(D.shape), n_checks=n_checks,
                     grad_name="gradient D for loss '{}'".format(loss),
                     rtol=1e-4)

    # Test gradient z
    assert z[0].shape == _gradient_zi(
        X, z, D, loss, loss_params=loss_params).shape

    def pobj(zs):
        return _objective(X[:1], zs.reshape(1, n_atoms, -1), D, loss,
                          loss_params=loss_params)

    def grad(zs):
        return gradient_zi(X[0], zs, D, loss=loss, flatten=True,
                           loss_params=loss_params)

    gradient_checker(pobj, grad, n_atoms * n_times_valid, n_checks=n_checks,
                     debug=True, grad_name="gradient z for loss '{}'"
                     .format(loss), rtol=1e-4)
Пример #5
0
def test_gradient_d(loss):
    # Generate synchronous D
    n_times_atom, n_times = 10, 100
    n_channels = 5
    n_atoms = 2
    n_trials = 3

    # Constant for the DTW loss
    loss_params = dict(gamma=1, sakoe_chiba_band=n_times_atom // 2)

    rng = np.random.RandomState()
    X = rng.normal(size=(n_trials, n_channels, n_times))
    z = rng.normal(size=(n_trials, n_atoms, n_times - n_times_atom + 1))
    d = rng.normal(size=(n_atoms, n_channels, n_times_atom)).ravel()

    if loss == 'whitening':
        loss_params['ar_model'], X = whitening(X, ordar=10)

    def func(d0):
        D0 = d0.reshape(n_atoms, n_channels, n_times_atom)
        X_hat = construct_X_multi(z, D=D0)
        return compute_objective(X, X_hat, loss=loss, loss_params=loss_params)

    def grad(d0):
        return gradient_d(D=d0,
                          X=X,
                          z=z,
                          loss=loss,
                          loss_params=loss_params,
                          flatten=True)

    error = optimize.check_grad(func, grad, d, epsilon=2e-8)
    grad_d = grad(d)
    n_grad = np.sqrt(np.dot(grad_d, grad_d))
    try:
        assert error < 1e-5 * n_grad, "Gradient is false: {:.4e}".format(error)
    except AssertionError:
        if DEBUG:
            grad_approx = optimize.approx_fprime(d, func, 2e-8)

            import matplotlib.pyplot as plt
            plt.semilogy(abs(grad_approx - grad_d))
            plt.figure()
            plt.plot(grad_approx, label="approx")
            plt.plot(grad_d, '--', label="grad")
            plt.legend()
            plt.show()
        raise
Пример #6
0
def test_consistency(loss, func):
    """Check that the result are the same for the full rank D and rank 1 uv.
    """
    n_trials, n_channels, n_times = 5, 3, 30
    n_atoms, n_times_atom = 4, 7

    loss_params = dict(gamma=.01)

    n_times_valid = n_times - n_times_atom + 1

    X = np.random.randn(n_trials, n_channels, n_times)
    z = np.random.randn(n_trials, n_atoms, n_times_valid)

    uv = np.random.randn(n_atoms, n_channels + n_times_atom)
    D = get_D(uv, n_channels)

    if loss == "whitening":
        loss_params['ar_model'], X = whitening(X)

    val_D = func(X, z, D, loss, loss_params=loss_params)
    val_uv = func(X, z, uv, loss, loss_params=loss_params)
    assert np.allclose(val_D, val_uv)