Пример #1
0
def apply_solver(evoked, forward, noise_cov, loose=0.2, depth=0.8, K=2000):
    all_ch_names = evoked.ch_names
    # put the forward solution in fixed orientation if it's not already
    if loose is None and not is_fixed_orient(forward):
        forward = deepcopy(forward)
        forward = mne.convert_forward_solution(forward, force_fixed=True)

    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
        forward,
        evoked.info,
        noise_cov,
        pca=False,
        depth=depth,
        loose=loose,
        weights=None,
        weights_min=None)

    n_locations = gain.shape[1]
    sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
    M = evoked.data[sel]

    # Whiten data
    M = np.dot(whitener, M)

    n_orient = 1 if is_fixed_orient(forward) else 3

    # The value of lambda for which the solution will be all zero
    lambda_max = norm_l2inf(np.dot(gain.T, M), n_orient)

    lambda_ref = 0.1 * lambda_max

    Xs, active_sets = mm_mixed_norm_bayes(M,
                                          gain,
                                          lambda_ref,
                                          n_orient=n_orient,
                                          K=K,
                                          verbose=True)

    solution_support = np.zeros((K, n_locations))
    stcs, obj_fun = [], []
    for k in range(K):
        X = np.zeros((n_locations, Xs[k].shape[1]))
        X[active_sets[k]] = Xs[k]
        block_norms_new = compute_block_norms(X, n_orient)
        block_norms_new = (block_norms_new > 0.05 * block_norms_new.max())
        solution_support[k, :] = block_norms_new

        stc = _make_sparse_stc(Xs[k],
                               active_sets[k],
                               forward,
                               tmin=0.,
                               tstep=1. / evoked.info['sfreq'])
        stcs.append(stc)
        obj_fun.append(
            energy_l2half_reg(M, gain, stc.data, active_sets[k], lambda_ref,
                              n_orient))
    return solution_support, stcs, obj_fun
Пример #2
0
lambda_max = np.max(np.linalg.norm(np.dot(G.T, M), axis=1))
lambda_ref = lambda_percent / 100. * lambda_max

X_mm, active_set_mm, E = \
    iterative_mixed_norm_solver(M, G, lambda_ref, n_mxne_iter=10)

pobj_l2half_X_mm = E[-1]

print("Found support: %s" % np.where(active_set_mm)[0])

###############################################################################
# Run the solver
# --------------

Xs, active_sets, lpp_samples, lpp_Xs, pobj_l2half_Xs = \
    mm_mixed_norm_bayes(M, G, lambda_ref, K=K)

# Plot if we found better local minima then the first result found be the
plt.figure()
plt.hist(pobj_l2half_Xs, bins=20, label="Modes obj.")
plt.axvline(pobj_l2half_X_mm, label="MM obj.", color='k')
plt.legend()
plt.tight_layout()

###############################################################################
# Plot the frequency of the supports
# ----------------------------------

unique_supports = unique_rows(active_sets)
n_modes = len(unique_supports)
def test_mm_mixed_norm_bayes():
    """Basic test of the mm_mixed_norm_bayes function"""
    # First we define the problem size and the location of the active sources.
    n_features = 16
    n_samples = 24
    n_times = 5

    X_true = np.zeros((n_features, n_times))
    # Active sources at indices 10 and 30
    X_true[5, :] = 2.
    X_true[10, :] = 2.

    # Construction of a covariance matrix
    rng = np.random.RandomState(0)
    # Set the correlation of each simulated source
    corr = [0.6, 0.95]
    cov = []
    for c in corr:
        this_cov = toeplitz(c**np.arange(0, n_features // len(corr)))
        cov.append(this_cov)

    cov = np.array(linalg.block_diag(*cov))

    # Simulation of the design matrix / forward operator
    G = rng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)

    # Simulation of the data with some noise
    M = G.dot(X_true)
    M += 0.1 * np.std(M) * rng.randn(n_samples, n_times)
    n_orient = 1

    # Define the regularization parameter and run the solver
    lambda_max = norm_l2inf(np.dot(G.T, M), n_orient)
    lambda_ref = 0.3 * lambda_max
    K = 10
    random_state = 0  # set random seed to make results replicable
    out = mm_mixed_norm_bayes(M,
                              G,
                              lambda_ref,
                              n_orient=n_orient,
                              K=K,
                              random_state=random_state)

    Xs, active_sets = out[:2]
    lpp_samples, lppMAP, pobj_l2half = out[2:]

    freq_occ = np.mean(active_sets, axis=0)
    assert_equal(np.argsort(freq_occ)[-2:], [9, 5])
    assert len(Xs) == K
    assert lpp_samples.shape == (K, )
    assert pobj_l2half.shape == (K, )
    assert lppMAP.shape == (K, )

    out = mm_mixed_norm_bayes(M,
                              G,
                              lambda_ref,
                              n_orient=n_orient,
                              K=K,
                              return_samples=True,
                              random_state=random_state)

    Xs, active_sets = out[:2]
    lpp_samples, lppMAP, pobj_l2half = out[2:-2]
    X_samples, gamma_samples = out[-2:]

    freq_occ = np.mean(active_sets, axis=0)
    assert_equal(np.argsort(freq_occ)[-2:], [9, 5])
    assert len(Xs) == K
    assert lpp_samples.shape == (K, )
    assert pobj_l2half.shape == (K, )
    assert lppMAP.shape == (K, )
    assert X_samples.shape == (K, n_features, n_times, 2)
    assert gamma_samples.shape == (K, n_features, 2)
Пример #4
0
###############################################################################
# Simulation of the data with some noise
M = G.dot(X_true)
M += 0.3 * np.std(M) * rng.randn(n_samples, n_times)
n_orient = 1

###############################################################################
# Define the regularization parameter and run the solver
# ------------------------------------------------------
lambda_max = norm_l2inf(np.dot(G.T, M), n_orient)

lambda_ref = 0.1 * lambda_max
K = 2000
out = mm_mixed_norm_bayes(M,
                          G,
                          lambda_ref,
                          n_orient=n_orient,
                          K=K,
                          verbose=True)

Xs, active_sets = out

freq_occ = np.mean(active_sets, axis=0)

###############################################################################
# Plot the covariance to see the correlation of the neighboring
# sources around each simulated one (10 and 30).

plt.matshow(cov)
plt.title('Covariance')

# Plot the active support of the solution