Exemple #1
0
def test_fista_decrease(momentum, max_iter):
    """ Test that the BOLD-TV cost function deacrease. """
    h = double_gamma_hrf(1.0, 30)
    n_times = 100
    n_times_valid = n_times - len(h) + 1

    y = np.random.randn(1, n_times)
    x0 = np.random.randn(1, n_times_valid)

    hth = np.convolve(h[::-1], h)
    htY = np.r_[[np.convolve(h[::-1], y_, mode='valid') for y_ in y]]

    lipsch_cst = lipsch_cst_from_kernel(h, n_times_valid)
    step_size = 1.0 / lipsch_cst
    lbda = 1.0

    def grad(x):
        return _grad_t_analysis(x, hth, htY)

    def obj(x):
        return _obj_t_analysis(x, y, h, lbda=lbda)

    def prox(x, step_size):
        return np.r_[[tv1_1d(x_, lbda * step_size) for x_ in x]]

    x1 = fista(grad,
               obj,
               prox,
               x0,
               momentum=momentum,
               max_iter=max_iter,
               step_size=step_size)

    assert _obj_t_analysis(x0, y, h, lbda) >= _obj_t_analysis(x1, y, h, lbda)
Exemple #2
0
def test_loss_coherence():
    """ Test the loss function coherence. """
    t_r = 1.0
    n_time_hrf = 30
    n_time_valid = 100
    n_time = n_time_valid + n_time_hrf - 1
    n_samples = 10
    h = double_gamma_hrf(t_r, n_time_hrf)
    u = np.random.randn(n_samples, n_time_valid)
    x = np.random.randn(n_samples, n_time)
    u_ = check_tensor(u)
    x_ = check_tensor(x)
    lbda = 0.1
    kwargs = dict(h=h, n_times_valid=n_time_valid, n_layers=10)
    net_solver = LpgdTautStringHRF(**kwargs)
    loss = float(net_solver._loss_fn(x_, lbda, u_))
    loss_ = _obj_t_analysis(u, x, h, lbda)
    np.testing.assert_allclose(loss, loss_, rtol=1e-3)
                  n_times_valid=n_times_valid,
                  name='Iterative-z',
                  max_iter_z=int(args.iter_mult * args.max_iter_z),
                  solver_type='fista-z-step',
                  verbose=1)
    ta_iter = TA(**params)

    t0 = time.time()
    _, _, _ = ta_iter.prox_t(y_test, args.temp_reg)
    print(f"ta_iterative.prox_t finished : {time.time() - t0:.2f}s")
    loss_ta_iter = ta_iter.l_loss_prox_t

    n_samples = nx * ny * nz
    y_test_ravel = y_test.reshape(n_samples, args.n_time_frames)
    _, u0, _ = init_vuz(H, D, y_test_ravel, args.temp_reg)
    loss_ta_learn = [_obj_t_analysis(u0, y_test_ravel, h, args.temp_reg)]

    init_net_params = None
    params = dict(t_r=t_r,
                  h=h,
                  n_times_valid=n_times_valid,
                  net_solver_training_type='recursive',
                  name='Learned-z',
                  solver_type='learn-z-step',
                  verbose=1,
                  max_iter_training_net=args.max_training_iter)

    for i, n_layers in enumerate(all_layers):

        params['max_iter_z'] = n_layers
Exemple #4
0
 def obj(x):
     return _obj_t_analysis(x, y, h, 0.0)
Exemple #5
0
 def obj(x):
     return _obj_t_analysis(x, y, h, lbda=lbda)