def apply_solver(evoked, forward, noise_cov, loose=0.2, depth=0.8, K=2000): all_ch_names = evoked.ch_names # put the forward solution in fixed orientation if it's not already if loose is None and not is_fixed_orient(forward): forward = deepcopy(forward) forward = mne.convert_forward_solution(forward, force_fixed=True) gain, gain_info, whitener, source_weighting, mask = _prepare_gain( forward, evoked.info, noise_cov, pca=False, depth=depth, loose=loose, weights=None, weights_min=None) n_locations = gain.shape[1] sel = [all_ch_names.index(name) for name in gain_info['ch_names']] M = evoked.data[sel] # Whiten data M = np.dot(whitener, M) n_orient = 1 if is_fixed_orient(forward) else 3 # The value of lambda for which the solution will be all zero lambda_max = norm_l2inf(np.dot(gain.T, M), n_orient) lambda_ref = 0.1 * lambda_max out = mm_mixed_norm_bayes(M, gain, lambda_ref, n_orient=n_orient, K=K, return_lpp=True) (Xs, active_sets), _, _, _, _ = out solution_support = np.zeros((K, n_locations)) stcs, obj_fun = [], [] for k in range(K): X = np.zeros((n_locations, Xs[k].shape[1])) X[active_sets[k]] = Xs[k] block_norms_new = compute_block_norms(X, n_orient) block_norms_new = (block_norms_new > 0.05 * block_norms_new.max()) solution_support[k, :] = block_norms_new stc = _make_sparse_stc(Xs[k], active_sets[k], forward, tmin=0., tstep=1. / evoked.info['sfreq']) stcs.append(stc) obj_fun.append( energy_l2half_reg(M, gain, stc.data, active_sets[k], lambda_ref, n_orient)) return solution_support, stcs, obj_fun
def apply_solver_isdr(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): # Import the necessary private functions from mne.inverse_sparse.mxne_inverse import \ (_prepare_gain, _check_loose_forward, is_fixed_orient, _reapply_source_weighting, _make_sparse_stc) all_ch_names = evoked.ch_names loose, forward = _check_loose_forward(loose, forward) # put the forward solution in fixed orientation if it's not already if loose == 0. and not is_fixed_orient(forward): forward = mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True) # Handle depth weighting and whitening (here is no weights) gain, gain_info, whitener, source_weighting, mask = _prepare_gain( forward, evoked.info, noise_cov, pca=False, depth=depth, loose=loose, weights=None, weights_min=None) print source_weighting # Select channels of interest sel = [all_ch_names.index(name) for name in gain_info['ch_names']] M = evoked.data[sel] # Whiten data M = np.dot(whitener, M).astype(np.double) active_set = np.zeros(gain.shape[1], dtype=bool) gain = gain[:, :].astype(np.double) n_orient = 1 if is_fixed_orient(forward) else 3 SC = np.eye(np.shape(gain)[1]).astype(np.int32) GA = gain.copy().astype(np.double) X, n_active_set, coef = solver_isdr(M, gain, GA, SC) active_set[n_active_set] = True #X = _reapply_source_weighting(X, source_weighting, active_set, n_orient) #Z = np.zeros((np.shape(gain)[1], np.shape(M)[1])) #Z[np.array(n_active_set), :] = X[:np.shape(M)[1], :len(n_active_set)].T #stc = _make_sparse_stc(Z, active_set, forward, tmin=evoked.times[0], tstep=1. / evoked.info['sfreq']) return X #stc
def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): """Call a custom solver on evoked data. This function does all the necessary computation: - to select the channels in the forward given the available ones in the data - to take into account the noise covariance and do the spatial whitening - to apply loose orientation constraint as MNE solvers - to apply a weigthing of the columns of the forward operator as in the weighted Minimum Norm formulation in order to limit the problem of depth bias. Parameters ---------- solver : callable The solver takes 3 parameters: data M, gain matrix G, number of dipoles orientations per location (1 or 3). A solver shall return 2 variables: X which contains the time series of the active dipoles and an active set which is a boolean mask to specify what dipoles are present in X. evoked : instance of mne.Evoked The evoked data forward : instance of Forward The forward solution. noise_cov : instance of Covariance The noise covariance. loose : float in [0, 1] | 'auto' Value that weights the source variances of the dipole components that are parallel (tangential) to the cortical surface. If loose is 0 then the solution is computed with fixed orientation. If loose is 1, it corresponds to free orientations. The default value ('auto') is set to 0.2 for surface-oriented source space and set to 1.0 for volumic or discrete source space. depth : None | float in [0, 1] Depth weighting coefficients. If None, no depth weighting is performed. Returns ------- stc : instance of SourceEstimate The source estimates. """ # Import the necessary private functions from mne.inverse_sparse.mxne_inverse import \ (_prepare_gain, is_fixed_orient, _reapply_source_weighting, _make_sparse_stc) all_ch_names = evoked.ch_names # Handle depth weighting and whitening (here is no weights) forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( forward, evoked.info, noise_cov, pca=False, depth=depth, loose=loose, weights=None, weights_min=None, rank=None) # Select channels of interest sel = [all_ch_names.index(name) for name in gain_info['ch_names']] M = evoked.data[sel] # Whiten data M = np.dot(whitener, M) n_orient = 1 if is_fixed_orient(forward) else 3 X, active_set = solver(M, gain, n_orient) X = _reapply_source_weighting(X, source_weighting, active_set) stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0], tstep=1. / evoked.info['sfreq']) return stc
def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): """Function to call a custom solver on evoked data This function does all the necessary computation: - to select the channels in the forward given the available ones in the data - to take into account the noise covariance and do the spatial whitening - to apply loose orientation constraint as MNE solvers - to apply a weigthing of the columns of the forward operator as in the weighted Minimum Norm formulation in order to limit the problem of depth bias. Parameters ---------- solver : callable The solver takes 3 parameters: data M, gain matrix G, number of dipoles orientations per location (1 or 3). A solver shall return 2 variables: X which contains the time series of the active dipoles and an active set which is a boolean mask to specify what dipoles are present in X. evoked : instance of mne.Evoked The evoked data forward : instance of Forward The forward solution. noise_cov : instance of Covariance The noise covariance. loose : None | float in [0, 1] Value that weights the source variances of the dipole components defining the tangent space of the cortical surfaces. Requires surface- based, free orientation forward solutions. depth : None | float in [0, 1] Depth weighting coefficients. If None, no depth weighting is performed. Returns ------- stc : instance of SourceEstimate The source estimates. """ # Import the necessary private functions from mne.inverse_sparse.mxne_inverse import \ (_prepare_gain, _to_fixed_ori, is_fixed_orient, _reapply_source_weighting, _make_sparse_stc) all_ch_names = evoked.ch_names # put the forward solution in fixed orientation if it's not already if loose is None and not is_fixed_orient(forward): forward = forward.copy() _to_fixed_ori(forward) # Handle depth weighting and whitening (here is no weights) gain, gain_info, whitener, source_weighting, mask = _prepare_gain( forward, evoked.info, noise_cov, pca=False, depth=depth, loose=loose, weights=None, weights_min=None) # Select channels of interest sel = [all_ch_names.index(name) for name in gain_info['ch_names']] M = evoked.data[sel] # Whiten data M = np.dot(whitener, M) n_orient = 1 if is_fixed_orient(forward) else 3 X, active_set = solver(M, gain, n_orient) X = _reapply_source_weighting(X, source_weighting, active_set, n_orient) stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0], tstep=1. / evoked.info['sfreq']) return stc
] print "fixed orient" print is_fixed_orient(fwd) if not is_fixed_orient(fwd): _to_fixed_ori(fwd) print "difference of G" print np.max(np.abs(fwd['sol']['data'])) / np.min( np.abs(fwd['sol']['data'])) if whiten_flag: pca = True G, G_info, whitener, source_weighting, mask = _prepare_gain( fwd, evoked.info, noise_cov, pca=pca, depth=depth, loose=None, weights=None, weights_min=None) #Sigma_E_chol = np.linalg.cholesky(Sigma_E) #Sigma_E_chol_inv = np.linalg.inv(Sigma_E_chol) #G = np.dot(Sigma_E_chol_inv, G) # after whitening, the noise cov is assumed to identity #Sigma_E = (np.dot(Sigma_E_chol_inv, Sigma_E)).dot(Sigma_E_chol_inv.T) Sigma_E = np.eye(G.shape[0]) M = (np.dot(whitener, M)).transpose([1, 0, 2]) else: G = fwd['sol']['data'][sel, :] G_column_weighting = (np.sum(G**2, axis=0))**(depth / 2) G = G / G_column_weighting
def save_visualized_jt(M, noise_cov_path, evoked_path, Sigma_J_list, ut, ROI_list, n_ROI_valid, subjects_dir, subj, fwd_path, out_stc_name, out_fig_name, whiten_flag, depth=None, force_fixed=True, tmin=0, tstep=0.01): """ # ut and yt can be for a single time point """ if depth == None: depth = 0.0 q, n, T = M.shape # this function returns a list, take the first element evoked = mne.read_evokeds(evoked_path)[0] # depth weighting, TO BE MODIFIED print force_fixed fwd0 = mne.read_forward_solution(fwd_path, force_fixed=force_fixed, surf_ori=True) fwd = copy.deepcopy(fwd0) noise_cov = mne.read_cov(noise_cov_path) Sigma_E = noise_cov.data # orientation of dipoles ind0 = fwd['src'][0]['inuse'] ind1 = fwd['src'][1]['inuse'] # positions of dipoles rr = np.vstack( [fwd['src'][0]['rr'][ind0 == 1, :], fwd['src'][1]['rr'][ind1 == 1, :]]) rr = rr / np.max(np.sum(rr**2, axis=1)) nn = np.vstack( [fwd['src'][0]['nn'][ind0 == 1, :], fwd['src'][1]['nn'][ind1 == 1, :]]) # number of dipoles m = rr.shape[0] all_ch_names = evoked.ch_names sel = [ l for l in range(len(all_ch_names)) if all_ch_names[l] not in evoked.info['bads'] ] if force_fixed: print "fixed orient" print is_fixed_orient(fwd) if not is_fixed_orient(fwd): _to_fixed_ori(fwd) print "difference of G" print np.max(np.abs(fwd['sol']['data'])) / np.min( np.abs(fwd['sol']['data'])) if whiten_flag: pca = True G, G_info, whitener, source_weighting, mask = _prepare_gain( fwd, evoked.info, noise_cov, pca=pca, depth=depth, loose=None, weights=None, weights_min=None) #Sigma_E_chol = np.linalg.cholesky(Sigma_E) #Sigma_E_chol_inv = np.linalg.inv(Sigma_E_chol) #G = np.dot(Sigma_E_chol_inv, G) # after whitening, the noise cov is assumed to identity #Sigma_E = (np.dot(Sigma_E_chol_inv, Sigma_E)).dot(Sigma_E_chol_inv.T) Sigma_E = np.eye(G.shape[0]) M = (np.dot(whitener, M)).transpose([1, 0, 2]) else: G = fwd['sol']['data'][sel, :] G_column_weighting = (np.sum(G**2, axis=0))**(depth / 2) G = G / G_column_weighting QJ = np.zeros(m) for l in range(len(ROI_list)): QJ[ROI_list[l]] = Sigma_J_list[l] L = np.zeros([m, n_ROI_valid]) for l in range(n_ROI_valid): L[ROI_list[l], l] = 1.0 if False: # compute the inverse inv_Sigma_E = np.linalg.inv(Sigma_E) GQE = G.T.dot(inv_Sigma_E) GQEG = GQE.dot(G) QJ_inv = 1.0 / QJ GQEG += np.diag(QJ_inv) inv_op = np.linalg.inv(GQEG) QJL = (L.T / QJ).T # =============debug ========================= GQJ = G * QJ Q0 = Sigma_E + (GQJ).dot(G.T) invQ0 = np.linalg.inv(Q0) chol = np.linalg.cholesky(invQ0) # QJ G'(QE+GQJG')^{-1} operator_y = GQJ.T.dot(invQ0) # I - QJ G' (QE+ GQJG')^[-1} G operator_u = (np.eye(m) - operator_y.dot(G)).dot(L) # QJ - QJ G' (QE+ GQJG')^[-1} G GJ post_var = (np.eye(m) - operator_y.dot(G)) * QJ marg_std = np.sqrt(np.diag(post_var)) if False: trial_ind = 0 time_ind = 0 plt.errorbar(range(m), J[trial_ind, :, time_ind], 2 * marg_std) plt.plot(range(m), J_true[trial_ind, :, time_ind]) J = np.zeros([q, m, T]) for r in range(q): #J[r] = inv_op.dot(np.dot(GQE, M[r]) + np.dot(QJL, ut[r])) J[r] = operator_y.dot(M[r]) + operator_u.dot(ut[r]) LU = (np.dot(L, ut)).transpose([1, 0, 2]) GLU = (np.dot(G.dot(L), ut)).transpose([1, 0, 2]) plt.plot(J[0, :, 0].ravel(), LU[0, :, 0].ravel(), '.') # mne results evoked = mne.read_evokeds(evoked_path)[0] # depth weighting, TO BE MODIFIED noise_cov = mne.read_cov(noise_cov_path) ch_names = evoked.info['ch_names'] # create the epochs first? M_all = np.zeros([q, len(ch_names), T]) valid_channel_ind = [ i for i in range(len(ch_names)) if ch_names[i] not in evoked.info['bads'] ] M_all[:, valid_channel_ind, :] = M.copy() events = np.ones([M.shape[0], 3], dtype=np.int) epochs = mne.EpochsArray(data=M_all, info=evoked.info, events=events, tmin=evoked.times[0], event_id=None, reject=None) method = "MNE" lambda2 = 1.0 depth0 = None if depth == 0 else depth inv_op = mne.minimum_norm.make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.0, depth=depth0, fixed=True) stcs = mne.minimum_norm.apply_inverse_epochs(epochs, inv_op, lambda2=lambda2, method=method) J_mne = np.zeros([q, m, T]) for r in range(q): J_mne[r] = stcs[r].data W = J - LU eta_hat = (operator_y.dot(M - GLU)).transpose([1, 0, 2]) # # compute the std of J J_std = np.std(J, axis=0) u_std = np.std(ut, axis=0) J_mne_std = np.std(J_mne, axis=0) mat_dict = scipy.io.loadmat(simupath) trial_ind = 0 tmp_J_list = [ J[trial_ind], J_mne[trial_ind], J_std, J_mne_std, mat_dict['J'][trial_ind] ] tmp_u_list = [ ut[trial_ind], ut[trial_ind], u_std, u_std, mat_dict['u'][trial_ind] ] suffix_list = ['trial%d' % trial_ind, "mne", "std", "mnestd", "truth"] # some other visulaizaation # trial_ind = 0 # tmp_J_list = [J[trial_ind], mat_dict['J'][trial_ind]] # tmp_u_list = [ut[trial_ind], mat_dict['u'][trial_ind]] # suffix_list = ['trial%d' % trial_ind, "truth"] times_in_ms = (np.arange(tmin, tmin + T * tstep, tstep)) * 1000.0 for ll in range(5): tmp_J, tmp_u, suffix = tmp_J_list[ll], tmp_u_list[ll], suffix_list[ll] plt.figure() for l in range(n_ROI): ROI_id = 0 _ = plt.subplot(n_ROI, 1, l + 1) _ = plt.plot(times_in_ms, tmp_J[ROI_list[l], :].T, 'b', alpha=0.1) if l < n_ROI_valid: _ = plt.plot(times_in_ms, tmp_u[l, :], 'k', lw=2, alpha=1) ROI_id = l + 1 _ = plt.xlabel('time ms') _ = plt.title("ROI %d" % ROI_id) _ = plt.tight_layout() #_ = plt.savefig(out_fig_name + "%s.pdf" %suffix) # save as an STC vertices_to = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']] stc = mne.SourceEstimate(data=J_std, vertices=vertices_to, tmin=tmin, tstep=tstep) stc.save(out_stc_name) # render the images clim = dict(kind='value', lims=np.array([0.1, 2, 10]) * 1E-10) time_seq = np.arange(0, T, 10) surface = "inflated" # brain = stc.plot(surface= surface, hemi='both', subjects_dir=subjects_dir, # subject = subj, clim=clim) # for k in time_seq: # brain.set_data_time_index(k) # for view in ['ventral']: # brain.show_view(view) # im_name = out_fig_name + "%03dms_%s.pdf" \ # %(np.int(np.round(stc.times[k]*1000)), view) # brain.save_image(im_name) # print k # brain.close() # # for hemi in ['lh','rh']: # brain = stc.plot(surface=surface, hemi= hemi, subjects_dir=subjects_dir, # subject = subj, clim=clim) # for k in time_seq: # brain.set_data_time_index(k) # for view in ['medial','lateral']: # brain.show_view(view) # im_name = out_fig_name + "%03dms_%s_%s.pdf" \ # %(np.int(np.round(stc.times[k]*1000)), view, hemi) # brain.save_image(im_name) # brain.close() return 0
def get_estimate_ks(M, ROI_list, n_ROI_valid, fwd_path, evoked_path, noise_cov_path, out_name, prior_Q0 = None, prior_Q = None, prior_sigma_J_list = None, prior_A = None, depth = None, MaxIter0 = 100, MaxIter = 50, MaxIter_coarse = 10, tol0 = 1E-4, tol = 1E-2, verbose0 = True, verbose = False, verbose_coarse = True, L_flag = False, whiten_flag = True, n_ini= 0, n_pool = 2, flag_A_time_vary = False, use_pool = False, ini_Gamma0_list = None, ini_A_list = None, ini_Gamma_list = None, ini_sigma_J_list = None, force_fixed=True, flag_inst_ini = True, a_ini = 0.1): """ Inputs: M, [q, n_channels, n_times] sensor data ROI_list, ROI indices list fwd_path, full path of the forward solution evoked_path, full path of the evoked template noise_cov_path, full path of the noise covariance out_name, full path of the mat name to save # actually due to scale issues, no depth weighting should be allowed in the simulation. # because normalizing G will result in strong violation of source generation assumptions priors: prior_Q0, prior_Q, prior_sigma_J_list, not implemented, may be inverse gamma or gamma prior_A, dict(lambda0 = 0.0, lambda1 = 1.0) depth: forward weighting parameter verbose: whiten_flag: if True, whiten the data, so that sensor error is identity n_ini, number of random initializations # list of initial values, # ini_Gamma0_list, ini_A_list, ini_Gamma_list, ini_sigma_J_list must have the same length """ if depth == None: depth = 0.0 q,_,T0 = M.shape T = T0-1 # this function returns a list, take the first element evoked = mne.read_evokeds(evoked_path)[0] # depth weighting, TO BE MODIFIED print force_fixed fwd0 = mne.read_forward_solution(fwd_path, force_fixed= force_fixed, surf_ori = True) fwd= copy.deepcopy(fwd0) noise_cov = mne.read_cov(noise_cov_path) Sigma_E = noise_cov.data # orientation of dipoles ind0 = fwd['src'][0]['inuse'] ind1 = fwd['src'][1]['inuse'] # positions of dipoles rr = np.vstack([fwd['src'][0]['rr'][ind0==1,:], fwd['src'][1]['rr'][ind1==1,:]]) rr = rr/np.max(np.sum(rr**2, axis = 1)) nn = np.vstack([fwd['src'][0]['nn'][ind0 == 1,:], fwd['src'][1]['nn'][ind1 == 1,:]]) # number of dipoles #m = rr.shape[0] all_ch_names = evoked.ch_names sel = [l for l in range(len(all_ch_names)) if all_ch_names[l] not in evoked.info['bads']] print "fixed orient" print is_fixed_orient(fwd) if not is_fixed_orient(fwd): _to_fixed_ori(fwd) print "difference of G" print np.max(np.abs(fwd['sol']['data']))/np.min(np.abs(fwd['sol']['data'])) if whiten_flag: pca = True G, G_info, whitener, source_weighting, mask = _prepare_gain(fwd, evoked.info, noise_cov, pca =pca, depth = depth, loose = None, weights = None, weights_min = None) #Sigma_E_chol = np.linalg.cholesky(Sigma_E) #Sigma_E_chol_inv = np.linalg.inv(Sigma_E_chol) #G = np.dot(Sigma_E_chol_inv, G) # after whitening, the noise cov is assumed to identity #Sigma_E = (np.dot(Sigma_E_chol_inv, Sigma_E)).dot(Sigma_E_chol_inv.T) Sigma_E = np.eye(G.shape[0]) M = (np.dot(whitener, M)).transpose([1,0,2]) else: G = fwd['sol']['data'][sel,:] G_column_weighting = (np.sum(G**2, axis = 0))**(depth/2) G = G/G_column_weighting # prior for L L_list_param = 1.5 # a exp (-b ||x-y||^2) Q_L_list = list() for i in range(n_ROI_valid): tmp_n = len(ROI_list[i]) tmp = np.zeros([tmp_n, tmp_n]) for i0 in range(tmp_n): for i1 in range(tmp_n): tmp[i0,i1] = np.dot(nn[i0,:], nn[i1,:])* np.exp(-L_list_param * (np.sum((rr[i0,:]-rr[i1,:])**2))) #print np.linalg.cond(tmp) Q_L_list.append(tmp) prior_L_precision = copy.deepcopy(Q_L_list) for i in range(n_ROI_valid): prior_L_precision[i] = np.linalg.inv(Q_L_list[i]) y_array = M.transpose([0,2,1]) # q,T,n scale_factor = 1E-9 p = n_ROI_valid L_list_0 = list() for i in range(n_ROI_valid): L_list_0.append(np.ones(ROI_list[i].size)) # default param list, A being all zero ini_param_list = list() Gamma0_0 = np.eye(p)*scale_factor Gamma_0 = np.eye(p)*scale_factor if flag_A_time_vary: A_0 = np.zeros([T,p,p]) for t in range(T): A_0[t] = np.eye(p)*a_ini else: A_0 = np.eye(p)*a_ini sigma_J_list_0 = np.ones(p)*scale_factor if ini_Gamma0_list is None: ini_Gamma0_list = list() if ini_A_list is None: ini_A_list = list() if ini_Gamma_list is None: ini_Gamma_list = list() if ini_sigma_J_list is None: ini_sigma_J_list = list() #if n_ini >= 0, append a new initialization, else do not if n_ini >= 0: ini_Gamma0_list.append(Gamma0_0) ini_A_list.append(A_0) ini_Gamma_list.append(Gamma_0) ini_sigma_J_list.append(sigma_J_list_0) ini_param_list = list() for l1 in range(len(ini_Gamma0_list)): ini_param_list.append(dict(y_array=y_array, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = ini_Gamma0_list[l1], A_0 = ini_A_list[l1], Gamma_0= ini_Gamma_list[l1], sigma_J_list_0 = ini_sigma_J_list[l1], L_list_0 = L_list_0, flag_A_time_vary = flag_A_time_vary, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = False, MaxIter = MaxIter_coarse, tol = tol, verbose = verbose_coarse, L_flag = L_flag)) # second initialization, least squares m = G.shape[1] L = np.zeros([m, n_ROI_valid]) for i in range(n_ROI_valid): L[ROI_list[i], i] = L_list_0[i] C = G.dot(L) R0 = Sigma_E.copy() for l in range(len(sigma_J_list_0)): R0 += sigma_J_list_0[l]**2 * G[:, ROI_list[l]].dot(G[:, ROI_list[l]].T) u_array_hat = get_lsq_u(y_array, R0,C) # set priors all to None, avoid coordinate decent to get the global solution Gamma0_ls, A_ls, Gamma_ls = get_param_given_u(u_array_hat, Gamma0_0, A_0, Gamma_0, flag_A_time_vary = flag_A_time_vary, prior_Q0 = None, prior_A = None, prior_Q = None, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = verbose0, MaxIter = MaxIter, tol = tol, verbose = verbose) # debug print "Gamma0_ls and Gamma_ls" print Gamma0_ls print Gamma_ls if n_ini >= 0: ini_param_list.append(dict(y_array=y_array, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = Gamma0_ls, A_0 = A_ls, Gamma_0= Gamma_ls, sigma_J_list_0 = sigma_J_list_0, L_list_0 = L_list_0, flag_A_time_vary = flag_A_time_vary, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = False, MaxIter = MaxIter_coarse, tol = tol, verbose = verbose_coarse, L_flag = L_flag)) if flag_inst_ini: # run the instantaneous model to get initialization for Q and sigma_J_list print "initilization using my instantaneous model" t_ind = 1 MMT = M[:,:,t_ind].T.dot(M[:,:,t_ind]) Qu0 = np.eye(p)*scale_factor**2 Sigma_J_list0 = np.ones(len(ROI_list))*scale_factor**2 # these parames are not used, alpha, beta = 1.0, 1.0; nu = p +1; V_inv = np.eye(p)*1E-4; eps = 1E-13; inv_Q_L_list = list() for i in range(n_ROI_valid): inv_Q_L_list.append(np.eye(len(ROI_list[i]))) Qu_hat0, Sigma_J_list_hat0, L_list_hat, obj = inst.get_map_coor_descent( Qu0, Sigma_J_list0, L_list_0, ROI_list, G, MMT, q, Sigma_E, nu, V_inv, inv_Q_L_list, alpha, beta, prior_Q = False, prior_Sigma_J = False, prior_L = False , Q_flag = True, Sigma_J_flag = True, L_flag = False, tau = 0.8, step_ini = 1.0, MaxIter = MaxIter, tol = tol, eps = eps, verbose = verbose, verbose0 = verbose0, MaxIter0 = MaxIter0, tol0 = tol0) print Qu_hat0, Sigma_J_list_hat0 ini_param_list.append(dict(y_array=y_array, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = np.linalg.cholesky(Qu_hat0), A_0 = A_0, Gamma_0 = np.linalg.cholesky(Qu_hat0), sigma_J_list_0 = np.sqrt(Sigma_J_list_hat0), L_list_0 = L_list_0, flag_A_time_vary = flag_A_time_vary, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = False, MaxIter = MaxIter_coarse, tol = tol, verbose = verbose_coarse, L_flag = L_flag)) if n_ini > 0 and flag_A_time_vary : # cut the time into n_ini segments evenly, compute the fixed A, and then concatenate them time_ind_dict_list = list() # each element is a dict, including l and time_ind_list_tmp for l in range(n_ini): # 1+2+..+ n_ini # segmant y_array! n_time_per_segment = (T+1)//(l+1) if l == 0: time_ind_dict_list.append(dict(l = l,time_ind =range(T+1))) else: for l0 in range(l): time_ind =range(l0*n_time_per_segment, (l0+1)*n_time_per_segment+1) time_ind_dict_list.append(dict(l = l, time_ind = time_ind)) time_ind_dict_list.append(dict(l = l, time_ind =range((l0+1)*n_time_per_segment, T+1))) # Gamma0, Gamm0_0, L_list_0, sigma_J_list_0, are already defined tmp_A0 = np.eye(p)*0.9 ini_param_fixed_A_list = list() for l0 in range(len(time_ind_dict_list)): print l0 y_array_tmp = y_array[:,time_ind_dict_list[l0]['time_ind'],:] print y_array_tmp.shape ini_param_fixed_A_list.append(dict(y_array=y_array_tmp, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = Gamma0_0, A_0 = tmp_A0, Gamma_0= Gamma_0, sigma_J_list_0 = sigma_J_list_0, L_list_0 = L_list_0, flag_A_time_vary = False, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = False, MaxIter = MaxIter_coarse, tol = tol, verbose = verbose_coarse, L_flag = L_flag)) # solve the individual if use_pool: pool = Pool(n_pool) result_fixed_list = pool.map(use_EM, ini_param_fixed_A_list) pool.close() else: result_fixed_list = list() for l0 in range(len(ini_param_fixed_A_list)): print "fixed %d th ini_param" %l0 result_fixed_list.append(use_EM(ini_param_fixed_A_list[l0])) # combine new A0_piecewise, add them to param list for l in range(n_ini): relevant_ind = [l0 for l0 in range(len(time_ind_dict_list)) if time_ind_dict_list[l0]['l'] == l] tmp_A0 = np.zeros([T,p,p]) tmp_Q0 = np.zeros([p,p]) for l0 in relevant_ind: tmp_time_ind = time_ind_dict_list[l0]['time_ind'] for t0 in tmp_time_ind[1::]: tmp_A0[t0-1,:,:] = result_fixed_list[l0]['A'] tmp_Gamma = result_fixed_list[l0]['Gamma'] tmp_Q0 += tmp_Gamma.dot(tmp_Gamma) tmp_Q0 /= np.float(len(relevant_ind)) ini_param_list.append(dict(y_array=y_array, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = Gamma0_0, A_0 = tmp_A0, Gamma_0= np.linalg.cholesky(tmp_Q0), sigma_J_list_0 = sigma_J_list_0, L_list_0 = L_list_0, flag_A_time_vary = flag_A_time_vary, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = False, MaxIter = MaxIter_coarse, tol = tol, verbose = False, L_flag = L_flag)) # after obtaining the multiple starting points, solve them with a few iterations # try parallel processing print "optimizing %d initializations" % len(ini_param_list) if use_pool: print "using pool" pool = Pool(n_pool) result_list = pool.map(use_EM, ini_param_list) pool.close() else: result_list = list() for l in range(len(ini_param_list)): result_list.append(use_EM(ini_param_list[l])) obj_all = np.zeros(len(result_list)) for l in range(len(result_list)): obj_all[l] = result_list[l]['obj'] print obj_all i_star = np.argmin(obj_all) ini_param = dict(y_array=y_array, G=G, ROI_list =ROI_list, Sigma_E = Sigma_E, Gamma0_0 = result_list[i_star]['Gamma0'], A_0 = result_list[i_star]['A'], Gamma_0= result_list[i_star]['Gamma'], sigma_J_list_0 = result_list[i_star]['sigma_J_list'], L_list_0 = result_list[i_star]['L_list'], flag_A_time_vary = flag_A_time_vary, prior_Q0 = prior_Q0, prior_A = prior_A, prior_Q = prior_Q, prior_L_precision = prior_L_precision, prior_sigma_J_list = prior_sigma_J_list, MaxIter0 = MaxIter0, tol0 = tol0, verbose0 = verbose0, MaxIter = MaxIter, tol = tol, verbose = verbose, L_flag = L_flag) result0 = use_EM(ini_param) Gamma0_hat, A_hat, Gamma_hat = result0['Gamma0'], result0['A'], result0['Gamma'] sigma_J_list_hat = result0['sigma_J_list'] L_list_hat = result0['L_list'] print result0['obj'] result = dict(Q0_hat = Gamma0_hat.dot(Gamma0_hat.T), Q_hat = Gamma_hat.dot(Gamma_hat.T), A_hat = A_hat, Sigma_J_list_hat = sigma_J_list_hat**2, L_list_hat = L_list_hat, u_array_hat = result0['u_t_T_array'], obj = result0['obj']) scipy.io.savemat(out_name, result)
def get_STFT_R_solution(evoked_list,X, fwd_list0, G_ind, noise_cov, label_list, GroupWeight_Param, active_set_z0, alpha_seq,beta_seq,gamma_seq, loose= None, depth=0.0, maxit=500, tol=1e-4, wsize=16, tstep=4, window=0.02, L2_option = 0, delta_seq = None, coef_non_zero_mat = None, Z0_l2 = None, Maxit_J=10, Incre_Group_Numb=50, dual_tol=0.01, Flag_backtrack = True, L0 = 1.0, eta = 1.5, Flag_verbose = False, Flag_nonROI_L2 = False): ''' Compute the L21 or L2 inverse solution of the stft regression. If Flag_trial_by_trial == True, use the "trial-by-trial" model for estiamtion, otherwise, use the simpler model without trial by trial terms Input: evoked_list, a list of evoked objects X, [n_trials, p] design matrix of the regresison fwd_list0, a list of n_run forward solution object run_ind, [n_trials, ] run index, starting from zero noise_cov, the noise covariance matrix label_list, a list of labels or ROIs. it can be None, in that case, each individual dipole is one group, also, GroupWeight_Param becomes invalid, penalty alpha is applied to every dipole, Flag_nonROI_L2 is set to False too. GroupWeight_param, a ratio of weights within ROIs / outside ROIs Group weights = 1/ n_dipoles in the group, times ratio, then normalized active_set_z0, the initial active_set alpha_seq, tuning sequence for alpha, (the group penalty) beta_seq, tuning sequence for beta, ( penalty for a single STFT basis function ) loose, depth, the loose and depth paramter for the source space maxit, the maximum number of iteration tol, numerical tolerance of the optimizaiton wsize, window size of the STFT tstep, time steps of the STFT window, windowing of the data, just to remove edge effects L2_option, 0, only compute the L21 solution 1, after computing the L21 solution, use them as the active set and get an L2 solution. If delta_seq is provided, run cross validation to get the best tuning parameter. 2, only compute the L2 solution, coef_non_zero_mat must not be None for this option, active_set_z0, active_t_ind must correspond to the active set delta_seq, the tuning sequence for the L2 solution if None, a default value will be used. coef_non_zero_mat, [active_set.sum(), n_coefs*p], boolean matrix, active set e.g. coef_non_zero_mat = np.abs(Z)>0 Z0_l2, the same size as coef_non_zero_mat, the initial value for L2 problems verbose, mne-python parameter, level of verbose Flag_nonROI_L2 = False, if true, all dipoles outside the ROIs are one large group. Maxit_J, when solving the L21 problem, maximum number of greedy steps to take in the active-set gready method Incre_Group_Numb: when solving the L21 problem, in the greedy step, each time include this number of first-level groups dual_tol: when solving the L21 problem,, if the violation of KKT for the greedy method is smaller than this value, stop depth, 0 to 1, the depth prior defined in the MNE algorithm, it normalizes the forward matrix, by dividing each column with (np.sum(G**2, axis = 0))**depth, such that deeper source points can larger influence. To make it valid, the input forward objects must not have fixed orientation! Flag_verbose, whether to print the optimization details of solving L21. Flag_backtrack = True, L0 = 1.0, eta = 1.5, parameters for backtracking Output: Z_full, [n_dipoles, n_coefs*p], complex matrix, the regression results active_set, [n_dipoles,] boolean array, dipole active set active_t_ind, [n_step,], boolean array, temporal active set, should be a full True vector stc_list, a list of stc objects, the source solutions alpha_star, the best alpha beta_star, the best beta gamma_star, the best gamma delta_star, the best delta ''' # ========================================================================= # some parameters to prepare the forward solution weights, weights_min, pca=None, None, True all_ch_names = evoked_list[0].ch_names info = evoked_list[0].info n_trials = len(evoked_list) # put the forward solution in fixed orientation if it's not already n_runs = len(np.unique(G_ind)) G_list = list() whitener_list = list() fwd_list = deepcopy(fwd_list0) for run_id in range(n_runs): if loose is None and not is_fixed_orient(fwd_list[run_id]): # follow the tf_mixed_norm _to_fixed_ori(fwd_list[run_id]) # mask should be None gain, gain_info, whitener, source_weighting, mask = _prepare_gain( fwd_list[run_id], info, noise_cov, pca, depth, loose, weights, weights_min) G_list.append(gain) whitener_list.append(whitener) # to debug # print np.linalg.norm(G_list[0]-G_list[1])/np.linalg.norm(G_list[0]) # print np.linalg.norm(whitener_list[0]-whitener_list[1]) # the whitener is the same across runs # apply the window to the data if window is not None: for r in range(n_trials): evoked_list[r] = _window_evoked(evoked_list[r], window) # prepare the sensor data sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] _, n_times = evoked_list[0].data[sel].shape n_sensors = G_list[0].shape[0] M = np.zeros([n_sensors, n_times, n_trials], dtype = np.float) # Whiten data logger.info('Accessing and Whitening data matrix.') # deal with SSP # the projector information should be applied to Y info = evoked_list[0].info # all forward solutions must hav ethe same channels, # if there are bad channels, make sure to remove them for all trials before using this function fwd_ch_names = [c['ch_name'] for c in fwd_list[0]['info']['chs']] ch_names = [c['ch_name'] for c in info['chs'] if (c['ch_name'] not in info['bads'] and c['ch_name'] not in noise_cov['bads']) and (c['ch_name'] in fwd_ch_names and c['ch_name'] in noise_cov.ch_names)] # ?? There is no projection in the 0.11 version, should I remove this too # proj should be None, since the projection should be applied after epoching proj, _, _ = mne.io.proj.make_projector(info['projs'], ch_names) for r in range(n_trials): M[:,:,r] = reduce(np.dot,[whitener,proj, evoked_list[r].data[sel]]) #========================================================================= # Create group information src = fwd_list[0]['src'] n_dip_per_pos = 1 if is_fixed_orient(fwd_list[0]) else 3 # number of actual nodes, each node can be associated with 3 dipoles n_dipoles = G_list[0].shape[1]//n_dip_per_pos ## this function is only for n_dip_per_pos == 1 #if n_dip_per_pos != 1: # raise ValueError("n_orientation must be 1 for now!") ## if label_list is None: nROI = 0 Flag_nonROI_L2 = False else: label_ind = list() for label in label_list: # get the column index corresponding to the ROI _, tmp_sel = label_src_vertno_sel(label,src) label_ind.append(tmp_sel) nROI = len(label_ind) DipoleGroup = list() isinROI = np.zeros(n_dipoles, dtype = np.bool) if n_dip_per_pos == 1: for i in range(nROI): DipoleGroup.append((np.array(label_ind[i])).astype(np.int)) isinROI[label_ind[i]] = True # dipoles outside the ROIs notinROI_ind = np.nonzero(isinROI==0)[0] if Flag_nonROI_L2: DipoleGroup.append(notinROI_ind.astype(np.int)) else: for i in range(len(notinROI_ind)): DipoleGroup.append(np.array([notinROI_ind[i]])) else: for i in range(nROI): tmp_ind = np.array(label_ind[i]) tmp_ind = np.hstack([tmp_ind*3, tmp_ind*3+1, tmp_ind*3+2]) DipoleGroup.append(tmp_ind.astype(np.int)) isinROI[tmp_ind] = True # dipoles outside the ROIs notinROI_ind = np.nonzero(isinROI==0)[0] if Flag_nonROI_L2: DipoleGroup.append(notinROI_ind.astype(np.int)) else: for i in range(len(notinROI_ind)): DipoleGroup.append(np.array([3*notinROI_ind[i], 3*notinROI_ind[i]+1, 3*notinROI_ind[i]+2]).astype(np.int)) # Group weights, weighted by number of dipoles in the group DipoleGroupWeight = 1.0/np.array([len(x) for x in DipoleGroup ]) DipoleGroupWeight[0:nROI] *= GroupWeight_Param DipoleGroupWeight /= DipoleGroupWeight.sum() # ========================================================================= # STFT constants n_step = int(np.ceil(n_times/float(tstep))) n_freq = wsize// 2+1 n_coefs = n_step*n_freq p = X.shape[1] # ========================================================================= # Scaling to make setting of alpha easy, modified from tf_mixed_norm in v0.11 alpha_max = norm_l2inf(np.dot(G_list[0].T, M[:,:,0]), n_dip_per_pos, copy=False) alpha_max *= 0.01 for run_id in range(n_runs): G_list[run_id] /= alpha_max # mne v0.11 tf_mixed_norm, "gain /= alpha_max source_weighting /= alpha_max" # so maybe the physcial meaning of source_weighting changed to its inverse # i.e. G_tilde = G*source_weighting # for MNE0.8, I used #source_weighting *= alpha_max source_weighting /= alpha_max cv_partition_ind = np.zeros(n_trials) cv_partition_ind[1::2] = 1 cv_MSE_lasso, cv_MSE_L2 = 0,0 # ========================================================================= if L2_option == 0 or L2_option == 1: # compute the L21 solution # setting the initial values, make sure ROIs are in the initial active set isinROI_ind = np.nonzero(isinROI)[0] if n_dip_per_pos == 1: active_set_z0[isinROI_ind] = True else: active_set_z0[3*isinROI_ind ] = True active_set_z0[3*isinROI_ind+1] = True active_set_z0[3*isinROI_ind+2] = True active_set_J_ini = np.zeros(len(DipoleGroup), dtype = np.bool) for l in range(len(DipoleGroup)): if np.sum(active_set_z0[DipoleGroup[l]]) > 0: active_set_J_ini[l] = True # if alpha and beta are sequences, use cross validation to select the best if len(alpha_seq) > 1 or len(beta_seq) > 1 or len(gamma_seq) >1: print "select alpha,beta and gamma" alpha_star, beta_star, gamma_star, cv_MSE_lasso = L21solver.select_alpha_beta_gamma_stft_tree_group_cv_active_set( M,G_list, G_ind, X, active_set_J_ini, DipoleGroup,DipoleGroupWeight, alpha_seq, beta_seq, gamma_seq, cv_partition_ind, n_orient=n_dip_per_pos, wsize=wsize, tstep = tstep, maxit=maxit, tol = tol, Maxit_J = Maxit_J, Incre_Group_Numb = Incre_Group_Numb, dual_tol = dual_tol, Flag_backtrack = Flag_backtrack, L0 = L0, eta = eta, Flag_verbose=Flag_verbose) else: alpha_star, beta_star, gamma_star = alpha_seq[0], beta_seq[0], gamma_seq[0] # randomly initialize Z0, make sure the imaginary part is zero Z0 = np.zeros([active_set_z0.sum(), n_coefs*p])*1j \ + np.random.randn(active_set_z0.sum(), n_coefs*p)*1E-20 tmp_result = L21solver.solve_stft_regression_tree_group_active_set( M, G_list, G_ind, X, alpha_star, beta_star, gamma_star, DipoleGroup, DipoleGroupWeight, Z0, active_set_z0, active_set_J_ini, n_orient=n_dip_per_pos, wsize=wsize, tstep=tstep, maxit=maxit, tol=tol, Maxit_J=Maxit_J, Incre_Group_Numb=Incre_Group_Numb, dual_tol=dual_tol, Flag_backtrack = Flag_backtrack, L0 = L0, eta = eta, Flag_verbose=Flag_verbose) if tmp_result is None: raise Exception("No active dipoles found. alpha is too big.") Z = tmp_result['Z'] active_set = tmp_result['active_set'] active_t_ind = np.ones(n_step, dtype = np.bool) # the following part is copied from tf_mixed_norm in v0.11 if mask is not None: active_set_tmp = np.zeros(len(mask), dtype=np.bool) active_set_tmp[mask] = active_set active_set = active_set_tmp del active_set_tmp # ===================================================================== delta_star = None # even if L2_option ==0, we will stil return an empty delta_star #re-run the regression with a given active set if L2_option == 1 or L2_option == 2: # if only L2 solution is needed, do some initialization, if L2_option == 2: if coef_non_zero_mat is None: raise ValueError("if L2_option == 2, coef_non_zero_mat must not be empty!") active_set= active_set_z0.copy() active_t_ind = np.ones(n_step, dtype = np.bool) if Z0_l2 is None: # make sure the imaginary part is zero Z = np.zeros([active_set_z0.sum(), n_coefs*p])*1j \ + np.random.randn(active_set_z0.sum(), n_coefs*p)*1E-20 else: Z = Z0_l2 alpha_star, beta_star, gamma_star = None, None, None if L2_option == 1: coef_non_zero_mat = np.abs(Z)>0 if delta_seq is None: delta_seq = np.array([1E-12,1E-10,1E-8]) if len(delta_seq) > 1: Z0 = Z.copy() Z0 = Z0[:, np.tile(active_t_ind,p*n_freq)] delta_star, cv_MSE_L2 = L2solver.select_delta_stft_regression_cv(M,G_list, G_ind, X, Z0, active_set, active_t_ind, coef_non_zero_mat, delta_seq,cv_partition_ind, wsize=wsize, tstep = tstep, maxit=maxit, tol = tol, Flag_backtrack = Flag_backtrack, L0 = L0, eta = eta, Flag_verbose = Flag_verbose) else: delta_star = delta_seq[0] # L2 optimization Z, obj = L2solver.solve_stft_regression_L2_tsparse(M,G_list, G_ind, X, Z, active_set, active_t_ind, coef_non_zero_mat, wsize=wsize, tstep = tstep, delta = delta_star, maxit=maxit, tol = tol, Flag_backtrack = Flag_backtrack, L0 = L0, eta = eta, Flag_verbose = Flag_verbose) # ========================================================================= # reweighting should be done after the debiasing!!! # Reapply weights to have correct unit, To Be modifiled # it seems that in MNE0.11, source_weighting is the inverse of the original source weighting # MNE 0.8 (verified in their 0.81 code "X /= source_weighting[active_set][:, None]") #Z /= source_weighting[active_set][:, None] # MNE 0.11 Z = _reapply_source_weighting(Z, source_weighting, active_set, n_dip_per_pos) Z_full = np.zeros([active_set.sum(),p, n_freq, n_step], dtype = np.complex) Z_full[:,:,:,active_t_ind] = np.reshape(Z,[active_set.sum(), p, n_freq,active_t_ind.sum()]) Z_full = np.reshape(Z_full, [active_set.sum(),-1]) # do not compute stc_list # tmin = evoked_list[0].times[0] # stc_tstep = 1.0 / info['sfreq'] # stc_list = list() # for r in range(n_trials): # tmp_stc_data = np.zeros([active_set.sum(),n_times]) # tmp_Z = np.zeros([active_set.sum(), n_coefs],dtype = np.complex) # for i in range(p): # tmp_Z += Z_full[:,i*n_coefs:(i+1)*n_coefs]* X[r,i] # # if it is a trial by_trial model, add the model for the single trial # tmp_stc_data = phiT(tmp_Z) # tmp_stc = _make_sparse_stc(tmp_stc_data, active_set, fwd_list[G_ind[r]], tmin, stc_tstep) # stc_list.append(tmp_stc) # logger.info('[done]') return Z_full, active_set, active_t_ind, alpha_star, beta_star, gamma_star, delta_star, cv_MSE_lasso, cv_MSE_L2
# Handling average file evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=0.04, tmax=0.340) evoked = evoked.pick_types(eeg=True, meg=True) # Handling forward solution forward = mne.read_forward_solution(fwd_fname) all_ch_names = evoked.ch_names loose, forward = _check_loose_forward(loose, forward) # Handle depth weighting and whitening (here is no weights) X, X_info, whitener, _, _ = _prepare_gain(forward, evoked.info, noise_cov, pca=False, depth=depth, loose=loose, weights=None, weights_min=None) # Select channels of interest sel = [all_ch_names.index(name) for name in X_info['ch_names']] Y = evoked.data[sel] # Whiten data Y = np.dot(whitener, Y) savemat('meg_Xy_new.mat', dict(X=X, Y=Y))