예제 #1
0
def save_regression_outputs(subject, epochs, suffix, results_path,
                            regressors_names, betas, scores):
    """
    This function saves in the results_path the regression score, betas and residuals.
    """
    results_path = results_path + '/' + subject + '/'
    utils.create_folder(results_path)
    np.save(op.join(results_path, 'scores--' + suffix[:-1] + '.npy'), scores)

    # save betas and residuals
    residuals = epochs.get_data()
    for ii, name_reg in enumerate(regressors_names):
        beta = epochs.average().copy()
        beta._data = np.asarray(betas[ii, :, :])
        beta.save(
            op.join(results_path,
                    'beta_' + name_reg + '--' + suffix[:-1] + '-ave.fif'))
        residuals = residuals - np.asarray([
            epochs.metadata[name_reg].values[i] * beta._data
            for i in range(len(epochs))
        ])

    residual_epochs = epochs.copy()
    residual_epochs._data = residuals
    residual_epochs.save(op.join(results_path, 'residuals' + '--' +
                                 suffix[:-1] + '-epo.fif'),
                         overwrite=True)
예제 #2
0
def save_regression_outputs(subject, epochs, suffix, results_path,
                            regressors_names, betas, scores):
    """
    This function saves in the results_path the regression score, betas and residuals.
    """
    results_path = results_path + '/' + subject + '/'
    utils.create_folder(results_path)
    np.save(op.join(results_path, 'scores--' + suffix[:-1] + '.npy'), scores)

    # save betas and residuals
    residuals = epochs.get_data()

    # -- explained with be the explained signal ---
    explained = []

    for ii, name_reg in enumerate(regressors_names):
        beta = epochs.average().copy()
        beta._data = np.asarray(betas[ii, :, :])
        beta.save(
            op.join(results_path,
                    'beta_' + name_reg + '--' + suffix[:-1] + '-ave.fif'))

        # -----   -----   -----   -----   -----   -----   -----   -----   -----   -----   -----   -----
        explained_signal = np.asarray([
            epochs.metadata[name_reg].values[i] * beta._data
            for i in range(len(epochs))
        ])
        if name_reg == 'Intercept':
            intercept = np.asarray([
                epochs.metadata[name_reg].values[i] * beta._data
                for i in range(len(epochs))
            ])
        else:
            explained.append(explained_signal)

    if 'Intercept' in regressors_names:
        residuals = residuals - intercept - np.sum(explained, axis=0)
        intercept_epochs = epochs.copy()
        intercept_epochs._data = intercept
        intercept_epochs.save(op.join(
            results_path, 'intercept' + '--' + suffix[:-1] + '-epo.fif'),
                              overwrite=True)
    else:
        residuals = residuals - np.sum(explained, axis=0)

    epochs.save(op.join(results_path,
                        'epochs' + '--' + suffix[:-1] + '-epo.fif'),
                overwrite=True)

    residual_epochs = epochs.copy()
    residual_epochs._data = residuals
    residual_epochs.save(op.join(results_path, 'residuals' + '--' +
                                 suffix[:-1] + '-epo.fif'),
                         overwrite=True)

    explained_signal_epochs = epochs.copy()
    explained_signal_epochs._data = np.mean(explained, axis=0)
    explained_signal_epochs.save(op.join(
        results_path, 'explained_signal' + '--' + suffix[:-1] + '-epo.fif'),
                                 overwrite=True)
예제 #3
0
def run_linear_regression_surprises(subject,
                                    omega_list,
                                    clean=False,
                                    decim=None,
                                    prefix='',
                                    Ridge=False,
                                    hfilter=20):

    epochs = epoching_funcs.load_epochs_items(subject, cleaned=clean)
    epochs.pick_types(meg=True, eeg=True)
    if hfilter is not None:
        epochs.filter(None, hfilter)

    if decim is not None:
        epochs.decimate(decim)

    metadata = epoching_funcs.update_metadata(subject,
                                              clean=clean,
                                              new_field_name=None,
                                              new_field_values=None)
    epochs.metadata = metadata
    df = epochs.metadata
    epochs.metadata = df.assign(Intercept=1)
    r2_surprise = {omega: [] for omega in omega_list}
    r2_surprise['times'] = epochs.times
    epochs_for_reg = epochs[np.where(
        1 - np.isnan(epochs.metadata["surprise_1"].values))[0]]
    epochs_for_reg = epochs_for_reg["SequenceID != 1"]
    epochs_for_reg_normalized = normalize_data(epochs_for_reg)

    out_path = op.join(config.result_path, 'TP_effects', 'surprise_omegas',
                       subject)
    utils.create_folder(out_path)
    if not Ridge:
        for omega in omega_list:
            print("==== running the regression for omega %i =======" % omega)
            surprise_name = "surprise_%.005f" % omega
            r2_surprise[omega] = linear_regression_from_sklearn(
                epochs_for_reg_normalized, surprise_name)
        # ===== save all the regression results =========
        fname = prefix + 'results_surprise.npy'
        np.save(op.join(out_path, fname), r2_surprise)

    else:
        surprise_names = ["surprise_%i" % omega for omega in omega_list]
        results_ridge = multi_ridge_regression_allIO(epochs_for_reg_normalized,
                                                     surprise_names)
        fname = prefix + 'results_Ridge_surprise.npy'
        np.save(op.join(out_path, fname), results_ridge)

    return True
예제 #4
0
def _compute_and_save_dissimilarity(epochs1, epochs2, subdir, subj_id, metric):

    print('\n\nComputing {:} dissimilarity (metric={:})...'.format(
        subdir, metric))

    dissim = umne.rsa.gen_observed_dissimilarity(epochs1,
                                                 epochs2,
                                                 metric=metric,
                                                 sliding_window_size=25,
                                                 sliding_window_step=4)

    filename = fn_template.dissim.format(subdir, metric, subj_id)
    utils.create_folder(op.split(filename)[0] + '/')
    print('Saving the dissimilarity matrix to {:}'.format(filename))
    dissim.save(filename)
예제 #5
0
def merge_individual_regression_results(regressors_names,
                                        epochs_fname,
                                        filter_name,
                                        suffix=''):
    """
    This function loads individual regression results (betas, computed by 'compute_regression' function)
     and saves them as an epochs object, with Nsubjects betas, per regressor
    :param regressors_names: regressors used in the regression (required to find path and files)
    :epochs_fname: '' empty unless regresssions was conducted with the residuals of a previous regression
    :param filter_name: 'Stand', 'Viol', 'StandMultiStructure', 'Hab', 'Stand_excluseRA', 'Viol_excluseRA', 'StandMultiStructure_excluseRA', 'Hab_excluseRA'
    """

    # Results path
    results_path = op.join(config.result_path, 'linear_models', filter_name)
    if epochs_fname != '':
        results_path = op.abspath(
            op.join(results_path, 'from_' + epochs_fname + '--'))
        to_append_to_results_path = ''
        for name in regressors_names:
            to_append_to_results_path += '_' + name
        results_path = results_path + to_append_to_results_path[1:]
    else:
        to_append_to_results_path = ''
        for name in regressors_names:
            to_append_to_results_path += '_' + name
        results_path = op.join(results_path, to_append_to_results_path[1:])

    # Load data from all subjects
    tmpdat = dict()
    for name in regressors_names:
        tmpdat[name], path_evo = evoked_funcs.load_evoked(
            'all', filter_name='beta_' + name + suffix, root_path=results_path)

    # Store as epo objects
    for name in regressors_names:
        dat = tmpdat[name][next(iter(tmpdat[name]))]
        exec(
            name +
            "_epo = mne.EpochsArray(np.asarray([dat[i][0].data for i in range(len(dat))]), dat[0][0].info, tmin="
            + str(np.round(dat[0][0].times[0], 3)) + ")", locals(), globals())

    # Save group fif files
    out_path = op.join(results_path, 'group')
    utils.create_folder(out_path)
    for name in regressors_names:
        exec(name + "_epo.save(op.join(out_path, '" + name + suffix +
             "_epo.fif'), overwrite=True)")
예제 #6
0
def save_evoked_levels_regressors(epochs, subject, regressors_names,
                                  results_path, suffix):
    """
    This function computes and saves the regression results when regressing on the epochs (or residuals if specified in epochs_fname)
    :param epochs: subject's NIP
    :param regressors_names: List of fieds that exist in the metadata of the epochs
    """

    for reg_name in regressors_names:
        save_reg_levels_evoked_path = results_path + subject + op.sep + reg_name + '_evo/'
        utils.create_folder(save_reg_levels_evoked_path)
        # --- these are the different values of the regressor ----
        levels = np.unique(epochs.metadata[reg_name])
        if len(levels) > 10:
            bins = np.linspace(np.min(levels), np.max(levels),
                               6)  ## changed from 11 to 6 --> to recompute
            for ii in range(5):
                epochs["%s >= %0.02f and %s < %0.02f" %
                       (reg_name, bins[ii], reg_name, bins[ii + 1])].average(
                       ).save(save_reg_levels_evoked_path + str(ii) + '-' +
                              suffix[:-1] + '-ave.fif')
        else:
            for k, lev in enumerate(levels):
                # epochs["%s == %s"%(reg_name,lev)].average().save(save_reg_levels_evoked_path+op.sep+str(np.round(lev,2))+'-'+suffix[:-1]+'-ave.fif')
                epochs["%s == %s" %
                       (reg_name,
                        lev)].average().save(save_reg_levels_evoked_path +
                                             op.sep + str(k) + '-' +
                                             suffix[:-1] + '-ave.fif')

    save_reg_levels_evoked_path = results_path + subject + '/SequenceID_evo/'
    utils.create_folder(save_reg_levels_evoked_path)
    levels = np.unique(epochs.metadata['SequenceID'])
    for lev in levels:
        epochs["%s == %s" %
               ('SequenceID',
                lev)].average().save(save_reg_levels_evoked_path + op.sep +
                                     str(np.round(lev, 2)) + '-' +
                                     suffix[:-1] + '-ave.fif')

    return True
예제 #7
0
def localize_standard_VS_deviant_code(subject,n_permutations = 2000,n_channels = 30,select_grad=False,cleaned=True):

    # ----------- load the epochs ---------------
    epochs = epoching_funcs.load_epochs_items(subject, cleaned=cleaned)
    epochs.pick_types(meg=True)

    # ----------- balance the position of the standard and the deviants -------
    # 'local' - Just make sure we have the same amount of standards and deviants for a given position. This may end up with
    #     1 standards/deviants for position 9 and 4 for the others.
    epochs_balanced = epoching_funcs.balance_epochs_violation_positions(epochs,balance_param="local")
    # ----------- do a sliding window to smooth the data -------
    epochs_balanced = epoching_funcs.sliding_window(epochs_balanced)

    # =============================================================================================
    toi = 0.165
    epochs_for_decoding = epochs_balanced.copy().crop(tmin=toi, tmax = toi)
    training_inds, testing_inds = SVM_funcs.train_test_different_blocks(epochs_for_decoding, return_per_seq=False)
    y_violornot = np.asarray(epochs_for_decoding.metadata['ViolationOrNot'].values)
    labels_train = [y_violornot[training_inds[i]] for i in range(2)]
    labels_test = [y_violornot[testing_inds[i]] for i in range(2)]

    performance_loc = compute_sensor_weights_decoder(epochs_for_decoding,
                                                          SVM_funcs.SVM_decoder(),
                                                          training_inds,
                                                          labels_train,
                                                          testing_inds,
                                                          labels_test, None,
                                                          None, n_permutations,
                                                          n_channels,select_grad=select_grad)

    suffix = ''
    if select_grad:
        suffix = 'only_grad'

    save_path = config.result_path + '/localization/Standard_VS_Deviant/'
    utils.create_folder(save_path)
    save_path_subject = save_path + subject + '/'+suffix
    utils.create_folder(save_path_subject)

    np.save(save_path_subject + 'results'+str(n_permutations)+'_permut'+str(n_channels)+'_chans'+'_'+str(round(toi*1000))+'.npy', performance_loc)
예제 #8
0
def plot_weights_maps(analysis_name='Standard_VS_Deviant',results_name='results.npy',suffix='',chan_types=['mag'],chance=None,vmin=None,vmax=None,font_size = 8):

    save_path = config.fig_path+'/localization/'+analysis_name+'/'
    utils.create_folder(save_path)
    epoch = sensor_weights_all_subj_as_epo(analysis_name=analysis_name,results_name=results_name)

    for chans in chan_types:
        fig = plt.figure(figsize=(3.,2.2))
        layout = mne.find_layout(epoch.info,ch_type=chans)
        data_to_plot = np.squeeze(epoch.copy().pick_types(meg=chans).average()._data)
        if chance is None:
            if 'grad' in chan_types:
                plt.scatter(np.asarray([layout.pos[i, 0] for i in range(0,len(layout.pos),2)]), [layout.pos[i, 1] for i in range(0,len(layout.pos),2)], c=[data_to_plot[i]  for i in range(0,len(layout.pos),2)], s=30, vmin=vmin, vmax=vmax)
            else:
                plt.scatter(layout.pos[:, 0], layout.pos[:, 1], c=data_to_plot, s=30, vmin=vmin, vmax=vmax)

        else:
            if 'grad' in chan_types:
                plt.scatter(np.asarray([layout.pos[i, 0] for i in range(0,len(layout.pos),2)]), [layout.pos[i, 1] for i in range(0,len(layout.pos),2)], c=[data_to_plot[i]-chance  for i in range(0,len(layout.pos),2)], s=30, vmin=vmin, vmax=vmax)
            else:
                plt.scatter(layout.pos[:, 0], layout.pos[:, 1], c=data_to_plot - chance, s=30, vmin=vmin, vmax=vmax)

        # plt.title(analysis_name+chans)
        plt.gca().get_xaxis().set_visible(False)
        plt.gca().get_yaxis().set_visible(False)
        plt.axis('off')
        cbar = plt.colorbar()
          # Adjust as appropriate.
        cbar.ax.tick_params(labelsize=font_size)
        plt.gcf().savefig(save_path+chans+suffix+'.png')
        plt.gcf().savefig(save_path+chans+suffix+'.svg')
        plt.gcf().show()
        fig = plt.gcf()
        plt.close('all')

    return fig
예제 #9
0
def regression_group_analysis(regressors_names,
                              epochs_fname,
                              filter_name,
                              suffix='',
                              Do3Dplot=True,
                              ch_types=['mag'],
                              suffix_evoked=''):
    """
    This function loads individual regression results merged as epochs arrays (with 'merge_individual_regression_results' function)
     and compute group level statistics (with various figures)
    :param regressors_names: regressors used in the regression (required to find path and files)
    :epochs_fname: '' empty unless regresssions was conducted with the residuals of a previous regression
    :param filter_name: 'Stand', 'Viol', 'StandMultiStructure', 'Hab', 'Stand_excluseRA', 'Viol_excluseRA', 'StandMultiStructure_excluseRA', 'Hab_excluseRA'
    :param suffix: '' or 'remapped_mtg' or 'remapped_gtm'
    :param Do3Dplot: create the sources figures (may not work, depending of the computer config)
    regressors_names = reg_names
    epochs_fname = ''
    filter_name = 'Hab'
    suffix='--remapped_mtgclean'
    Do3Dplot=False
    ch_types = ['mag']

    """

    # ===================== LOAD GROUP REGRESSION RESULTS & SET PATHS ==================== #

    # Results (data) path
    results_path = op.join(config.result_path, 'linear_models', filter_name)
    if epochs_fname != '':
        results_path = op.abspath(
            op.join(results_path, 'from_' + epochs_fname + '--'))
        to_append_to_results_path = ''
        for name in regressors_names:
            to_append_to_results_path += '_' + name
        results_path = results_path + to_append_to_results_path[1:]
    else:
        to_append_to_results_path = ''
        for name in regressors_names:
            to_append_to_results_path += '_' + name
        results_path = op.join(results_path, to_append_to_results_path[1:])
    results_path = op.join(results_path, 'group')

    # Load data
    betas = dict()
    for name in regressors_names:
        exec(name + "_epo = mne.read_epochs(op.join(results_path, '" + name +
             suffix + "_epo.fif'))")
        # betas[name] = globals()[name + '_epo']
        betas[name] = locals()[name + '_epo']
        print('There is ' + str(len(betas[name])) + ' betas for ' + name +
              suffix)

    # Results figures path
    fig_path = op.join(results_path, 'figures')
    utils.create_folder(fig_path)

    # Analysis name
    analysis_name = ''
    for name in regressors_names:
        analysis_name += '_' + name
    analysis_name = analysis_name[1:]

    # ====================== PLOT THE GROUP-AVERAGED SOURCES OF THE BETAS  ===================== #
    if Do3Dplot:
        all_stcs, all_betasevoked = linear_reg_funcs.plot_average_betas_with_sources(
            betas, analysis_name, fig_path, remap_grads=suffix)

    # ================= PLOT THE HEATMAPS OF THE GROUP-AVERAGED BETAS / CHANNEL ================ #
    linear_reg_funcs.plot_betas_heatmaps(betas,
                                         ch_types,
                                         fig_path,
                                         suffix=suffix)

    # =========================== PLOT THE BUTTERFLY OF THE REGRESSORS ========================== #
    linear_reg_funcs.plot_betas_butterfly(betas,
                                          ch_types,
                                          fig_path,
                                          suffix=suffix)

    # =========================================================== #
    # Group stats
    # =========================================================== #
    import matplotlib.pyplot as plt
    savepath = op.join(fig_path, 'Stats')
    utils.create_folder(savepath)
    nperm = 5000  # number of permutations
    threshold = None  # If threshold is None, t-threshold equivalent to p < 0.05 (if t-statistic)
    p_threshold = 0.05
    tmin = 0.000  # timewindow to test (crop data)
    tmax = 0.350  # timewindow to test (crop data)
    for ch_type in ch_types:
        for x, regressor_name in enumerate(betas.keys()):
            data_stat = copy.deepcopy(betas[regressor_name])
            data_stat.crop(tmin=tmin, tmax=tmax)  # crop

            print('\n\n' + regressor_name + ', ch_type ' + ch_type)
            cluster_stats = []
            data_array_chtype = []
            cluster_stats, data_array_chtype, _ = stats_funcs.run_cluster_permutation_test_1samp(
                data_stat,
                ch_type=ch_type,
                nperm=nperm,
                threshold=threshold,
                n_jobs=6,
                tail=0)
            cluster_info = stats_funcs.extract_info_cluster(
                cluster_stats, p_threshold, data_stat, data_array_chtype,
                ch_type)

            # Significant clusters
            T_obs, clusters, p_values, _ = cluster_stats
            good_cluster_inds = np.where(p_values < p_threshold)[0]
            print("Good clusters: %s" % good_cluster_inds)

            # PLOT CLUSTERS
            if len(good_cluster_inds) > 0:
                figname_initial = op.join(
                    savepath, analysis_name + '_' + regressor_name +
                    '_stats_' + ch_type + suffix)
                stats_funcs.plot_clusters(cluster_info,
                                          ch_type,
                                          T_obs_max=5.,
                                          fname=regressor_name,
                                          figname_initial=figname_initial,
                                          filter_smooth=False)

            if Do3Dplot:
                # SOURCES FIGURES FROM CLUSTERS TIME WINDOWS
                if len(good_cluster_inds) > 0:
                    # Group mean stc (all_stcs loaded before)
                    n_subjects = len(all_stcs[regressor_name])
                    mean_stc = all_stcs[regressor_name][0].copy(
                    )  # get copy of first instance
                    for sub in range(1, n_subjects):
                        mean_stc._data += all_stcs[regressor_name][sub].data
                    mean_stc._data /= n_subjects

                    for i_clu in range(cluster_info['ncluster']):
                        cinfo = cluster_info[i_clu]
                        twin_min = cinfo['sig_times'][0] / 1000
                        twin_max = cinfo['sig_times'][-1] / 1000
                        stc_timewin = mean_stc.copy()
                        stc_timewin.crop(tmin=twin_min, tmax=twin_max)
                        stc_timewin = stc_timewin.mean()
                        # max_t_val = mean_stc.get_peak()[1]
                        brain = stc_timewin.plot(views=['lat'],
                                                 surface='inflated',
                                                 hemi='split',
                                                 size=(1200, 600),
                                                 subject='fsaverage',
                                                 clim='auto',
                                                 subjects_dir=op.join(
                                                     config.root_path, 'data',
                                                     'MRI', 'fs_converted'),
                                                 smoothing_steps=5,
                                                 time_viewer=False)
                        screenshot = brain.screenshot()
                        brain.close()
                        nonwhite_pix = (screenshot != 255).any(-1)
                        nonwhite_row = nonwhite_pix.any(1)
                        nonwhite_col = nonwhite_pix.any(0)
                        cropped_screenshot = screenshot[
                            nonwhite_row][:, nonwhite_col]
                        plt.close('all')
                        fig = plt.imshow(cropped_screenshot)
                        plt.axis('off')
                        info = analysis_name + '_' + regressor_name + ' [%d - %d ms]' % (
                            twin_min * 1000, twin_max * 1000)
                        # figname_initial = savepath + op.sep + analysis_name + '_' + regressor_name + '_stats_' + ch_type
                        plt.title(info)
                        plt.savefig(op.join(savepath,
                                            info + suffix + '_sources.png'),
                                    bbox_inches='tight',
                                    dpi=600)
                        plt.close('all')

            # =========================================================== #
            # ==========  cluster evoked data plot --> per regressor level
            # =========================================================== #

            if len(good_cluster_inds) > 0 and regressor_name != 'Intercept':
                # ------------------ LOAD THE EVOKED FOR THE CURRENT CONDITION ------------ #
                path = op.abspath(op.join(results_path, os.pardir))
                subpath = regressor_name + '_evo'
                evoked_reg = evoked_funcs.load_regression_evoked(
                    subject='all',
                    path=path,
                    subpath=subpath,
                    filter=suffix_evoked)

                # ----------------- PLOTS ----------------- #
                for i_clu, clu_idx in enumerate(good_cluster_inds):
                    cinfo = cluster_info[i_clu]
                    fig = stats_funcs.plot_clusters_evo(
                        evoked_reg,
                        cinfo,
                        ch_type,
                        i_clu,
                        analysis_name=analysis_name + '_' + regressor_name,
                        filter_smooth=False,
                        legend=True,
                        blackfig=False)
                    fig_name = savepath + op.sep + analysis_name + '_' + regressor_name + '_stats_' + ch_type + '_clust_' + str(
                        i_clu + 1) + suffix + '_evo.jpg'
                    print('Saving ' + fig_name)
                    fig.savefig(fig_name,
                                dpi=300,
                                facecolor=fig.get_facecolor(),
                                edgecolor='none')
                    plt.close('all')

            # =========================================================== #
            # ==========  cluster evoked data plot --> per sequence
            # =========================================================== #
            if len(good_cluster_inds) > 0:
                # ------------------ LOAD THE EVOKED FOR EACH SEQUENCE ------------ #
                path = op.abspath(op.join(results_path, os.pardir))
                subpath = 'SequenceID' + '_evo'
                evoked_reg = evoked_funcs.load_regression_evoked(
                    subject='all', path=path, subpath=subpath)

                # ----------------- PLOTS ----------------- #
                for i_clu, clu_idx in enumerate(good_cluster_inds):
                    cinfo = cluster_info[i_clu]
                    fig = stats_funcs.plot_clusters_evo(
                        evoked_reg,
                        cinfo,
                        ch_type,
                        i_clu,
                        analysis_name=analysis_name + '_eachSeq',
                        filter_smooth=False,
                        legend=True,
                        blackfig=False)
                    fig_name = savepath + op.sep + analysis_name + '_' + regressor_name + '_stats_' + ch_type + '_clust_' + str(
                        i_clu + 1) + suffix + '_eachSeq_evo.jpg'
                    print('Saving ' + fig_name)
                    fig.savefig(fig_name,
                                dpi=300,
                                facecolor=fig.get_facecolor(),
                                edgecolor='none')
                    fig = stats_funcs.plot_clusters_evo_bars(
                        evoked_reg,
                        cinfo,
                        ch_type,
                        i_clu,
                        analysis_name=analysis_name + '_eachSeq',
                        filter_smooth=False,
                        legend=False,
                        blackfig=False)
                    fig_name = savepath + op.sep + analysis_name + '_' + regressor_name + '_stats_' + ch_type + '_clust_' + str(
                        i_clu + 1) + suffix + '_eachSeq_evo_bars.jpg'
                    print('Saving ' + fig_name)
                    fig.savefig(fig_name,
                                dpi=300,
                                facecolor=fig.get_facecolor(),
                                edgecolor='none')
                    plt.close('all')

            # =========================================================== #
            # ==========  heatmap betas plot
            # =========================================================== #
            if len(good_cluster_inds) > 0 and regressor_name != 'Intercept':
                linear_reg_funcs.plot_betas_heatmaps_with_clusters(
                    analysis_name, betas, ch_type, regressor_name,
                    cluster_info, good_cluster_inds, savepath, suffix)
예제 #10
0
def run_epochs(subject,
               epoch_on_first_element,
               baseline=True,
               tmin=None,
               tmax=None,
               whattoreturn=None):

    # SEt this param to True if you want to run autoreject locally too when config.autorject = True
    from datetime import datetime
    now = datetime.now().time()

    ARlocal = False

    print("Processing subject: %s" % subject)
    meg_subject_dir = op.join(config.meg_dir, subject)
    run_info_subject_dir = op.join(config.run_info_dir, subject)
    raw_list = list()
    events_list = list()

    if config.noEEG:
        output_dir = op.join(meg_subject_dir, 'noEEG')
        utils.create_folder(output_dir)
    else:
        output_dir = meg_subject_dir

    print("  Loading raw data")
    runs = config.runs_dict[subject]
    for run in runs:
        extension = run + '_ica_raw'
        print(extension)
        raw_fname_in = op.join(meg_subject_dir,
                               config.base_fname.format(**locals()))
        raw = mne.io.read_raw_fif(raw_fname_in, preload=True)

        # ---------------------------------------------------------------------------------------------------------------- #
        # RESAMPLING EACH RUN BEFORE CONCAT & EPOCHING
        # Resampling the raw data while keeping events from original raw data, to avoid potential loss of
        # events when downsampling: https://www.nmr.mgh.harvard.edu/mne/dev/auto_examples/preprocessing/plot_resample.html
        # Find events
        events = mne.find_events(raw,
                                 stim_channel=config.stim_channel,
                                 consecutive=True,
                                 min_duration=config.min_event_duration,
                                 shortest_event=config.shortest_event)

        print('  Downsampling raw data')
        raw, events = raw.resample(config.resample_sfreq,
                                   npad='auto',
                                   events=events)

        times_between_events_and_end = (raw.last_samp -
                                        events[:, 0]) / raw.info['sfreq']
        if np.sum(times_between_events_and_end < 0.6) > 0:
            print("=== some events are too close to the end ====")

        if len(events) != 46 * 16:
            raise Exception('We expected %i events but we got %i' %
                            (46 * 16, len(events)))

        raw_list.append(raw)
        # ---------------------------------------------------------------------------------------------------------------- #

    if subject == 'sub08-cc_150418':
        # For this participant, we had some problems when concatenating the raws for run08. The error message said that raw08._cals didn't match the other ones.
        # We saw that it is the 'calibration' for the channel EOG061 that was different with respect to run09._cals.
        raw_list[7]._cals = raw_list[8]._cals
        print(
            'Warning: corrected an issue with subject08 run08 ica_raw data file...'
        )

    print('Concatenating runs')
    raw = mne.concatenate_raws(raw_list)
    # raw.set_annotations(None)
    if "eeg" in config.ch_types:
        raw.set_eeg_reference(projection=True)
    del raw_list

    # Save resampled, concatenated runs (in case we need it)
    # print('Saving concatenated runs')
    # fname = op.join(meg_subject_dir, subject + '_allruns_final_raw.fif')
    # raw.save(fname, overwrite=True)

    if config.noEEG:
        picks = mne.pick_types(raw.info,
                               meg=True,
                               eeg=False,
                               stim=True,
                               eog=True,
                               exclude=())
    else:
        picks = mne.pick_types(raw.info,
                               meg=True,
                               eeg=True,
                               stim=True,
                               eog=True,
                               exclude=())

    # Construct metadata from csv events file
    metadata = convert_csv_info_to_metadata(run_info_subject_dir)
    metadata_pandas = pd.DataFrame.from_dict(metadata, orient='index')
    metadata_pandas = pd.DataFrame.transpose(metadata_pandas)

    # ====== Epoching the data
    print('  Epoching')

    # Events
    events = mne.find_events(raw,
                             stim_channel=config.stim_channel,
                             consecutive=True,
                             min_duration=config.min_event_duration,
                             shortest_event=config.shortest_event)

    if epoch_on_first_element:
        # fosca 06012020
        if tmin is None:
            tmin = -0.200
        if tmax is None:
            tmax = 0.25 * 17
        baseline = (tmin, 0)
        if (baseline is None) or (baseline is False):
            baseline = None
        for k in range(len(events)):
            events[k, 2] = k % 16 + 1
        epochs = mne.Epochs(raw,
                            events, {'sequence_starts': 1},
                            tmin,
                            tmax,
                            proj=True,
                            picks=picks,
                            baseline=baseline,
                            preload=False,
                            decim=config.decim,
                            reject=None)
        epochs.metadata = metadata_pandas[metadata_pandas['StimPosition'] ==
                                          1.0]
    else:
        if tmin is None:
            tmin = -0.050
        if tmax is None:
            tmax = 0.600
        if (baseline is None) or (baseline is False):
            baseline = None
        else:
            baseline = (tmin, 0)

        epochs = mne.Epochs(raw,
                            events,
                            None,
                            tmin,
                            tmax,
                            proj=True,
                            picks=picks,
                            baseline=baseline,
                            preload=False,
                            decim=config.decim,
                            reject=None)

        # Add metadata to epochs
        epochs.metadata = metadata_pandas

    # Save epochs (before AutoReject)

    if whattoreturn is None:
        print('  Writing epochs to disk')
        if epoch_on_first_element:
            extension = subject + '_1st_element_epo'
        else:
            extension = subject + '_epo'
        epochs_fname = op.join(output_dir,
                               config.base_fname.format(**locals()))
        print("Output: ", epochs_fname)
        epochs.save(epochs_fname, overwrite=True)
    elif whattoreturn == '':
        epochs.load_data()
        return epochs
    else:
        print("=== we continue on the autoreject part ===")

    if config.autoreject:
        epochs.load_data()
        # Running AutoReject "global" (https://autoreject.github.io) -> just get the thresholds
        from autoreject import get_rejection_threshold
        reject = get_rejection_threshold(epochs, ch_types=config.ch_types)
        epochsARglob = epochs.copy().drop_bad(reject=reject)
        print('  Writing "AR global" cleaned epochs to disk')
        if epoch_on_first_element:
            extension = subject + '_1st_element_ARglob_epo'
        else:
            extension = subject + '_ARglob_epo'
        epochs_fname = op.join(output_dir,
                               config.base_fname.format(**locals()))
        if whattoreturn is None:
            print("Output: ", epochs_fname)
            epochsARglob.save(epochs_fname, overwrite=True)
            pickle.dump(
                reject, open(epochs_fname[:-4] + '_ARglob_thresholds.obj',
                             'wb'))
        elif whattoreturn == 'ARglobal':
            return epochsARglob
        else:
            print("==== continue to ARlocal ====")
        # Save autoreject thresholds

        # Running AutoReject "local" (https://autoreject.github.io)
        if ARlocal:
            ar = AutoReject()
            epochsAR, reject_log = ar.fit_transform(epochs, return_log=True)
            print('  Writing "AR local" cleaned epochs to disk')
            if epoch_on_first_element:
                extension = subject + '_1st_element_clean_epo'
            else:
                extension = subject + '_clean_epo'
            epochs_fname = op.join(output_dir,
                                   config.base_fname.format(**locals()))
            if whattoreturn is None:
                print("Output: ", epochs_fname)
                epochsAR.save(epochs_fname, overwrite=True)
                # Save autoreject reject_log
                pickle.dump(
                    reject_log,
                    open(epochs_fname[:-4] + '_reject_local_log.obj', 'wb'))
            else:
                return epochsAR
예제 #11
0
def create_qsub(function_name,
                folder_name,
                suffix_name,
                sublist_subjects=None,
                queue='Unicog_long'):
    import subprocess
    import os, sys, glob

    # ==========================================================================================
    # ============= ============= create the jobs ============================= ============= ==
    # ==========================================================================================

    ########################################################################
    # List of parameters to be parallelized
    ListSubject = config.subjects_list
    if sublist_subjects is not None:
        ListSubject = sublist_subjects

    ########################################################################
    # Initialize job files and names

    List_python_files = []

    wkdir = config.cluster_path
    base_path = config.scripts_path
    initbody = 'import sys \n'
    initbody = initbody + "sys.path.append(" + "'" + base_path + "')\n"
    initbody = initbody + 'from ABseq_func import cluster_funcs\n'

    # Write actual job files
    python_file, Listfile, ListJobName = [], [], []

    for s, subject in enumerate(ListSubject):
        print(subject)

        additionnal_parameters = ''

        body = initbody + "cluster_funcs.%s('%s')" % (function_name, subject)

        jobname = suffix_name + '_' + subject

        ListJobName.append(jobname)

        # Write jobs in a dedicated folder
        path_jobs = wkdir + '/generated_jobs/' + folder_name + '/'
        utils.create_folder(path_jobs)
        name_file = path_jobs + jobname + '.py'
        Listfile.append(name_file)

        with open(name_file, 'w') as python_file:
            python_file.write(body)

    # ============== Loop over your jobs ===========

    jobs_path = config.cluster_path + "/generated_jobs/"
    results_path = config.cluster_path + "/results_qsub/"
    utils.create_folder(results_path + folder_name)
    list_scripts = sorted(glob.glob(jobs_path + folder_name + "/*.py"))

    # Loop over your jobs

    for i in list_scripts:
        # Customize your options here
        file_name = os.path.split(i)
        job_name = "%s" % file_name[1]

        walltime = "48:00:00"  # "24:00:00"
        if 'short' in queue:
            walltime = "2:00:00"  # "24:00:00"

        processors = "nodes=1:ppn=1"
        command = "python %s" % i
        standard_output = "/std_%s" % file_name[1]
        error_output = "/err_%s" % file_name[1]
        name_file = "/qsub_cmd_%s" % file_name[1]

        job_string = """#!/bin/bash
        #PBS -N %s
        #PBS -q %s 
        #PBS -l walltime=%s
        #PBS -l %s
        #PBS -o %s
        #PBS -e %s 
        cd %s
        %s""" % (job_name, queue, walltime, processors,
                 results_path + folder_name + standard_output, results_path +
                 folder_name + error_output, results_path, command)

        # job_file = jobs_path + folder_name + '/' + name_file
        job_file = jobs_path + name_file
        fichier = open(job_file, "w")
        fichier.write(job_string)
        fichier.close()

        # Send job_string to qsub
        cmd = "qsub %s" % (job_file)
        subprocess.call(cmd, shell=True)
예제 #12
0
evokeds = [
    ev.filter(l_freq=None, h_freq=30).apply_baseline(
        (-0.050, 0)).crop(0.0, 0.25) for ev in evokeds
]

for alpha in [0.3, 0.5, 0.8]:
    print('Computing group inverse with alpha=0.%i' % int(alpha * 10))
    stcs = compute_group_inverse(fwds,
                                 evokeds,
                                 noise_covs,
                                 method='multitasklasso',
                                 spatiotemporal=True,
                                 alpha=alpha)

    save_lasso_res_path = config.result_path + '/groupmne/lasso/'
    utils.create_folder(save_lasso_res_path)
    np.save(save_lasso_res_path + '/stcs_alpha0%i.npy' % int(alpha * 10), stcs)
    # sstcs = np.load(save_lasso_res_path+'/stcs_alpha0%i.npy'%int(alpha*10),allow_pickle=True)

############################################
# Visualize stcs
save_lasso_res_path = config.result_path + '/groupmne/lasso/'
alpha = 0.8
stcs = np.load(save_lasso_res_path + '/stcs_alpha0%i.npy' % int(alpha * 10),
               allow_pickle=True)
# data = np.average([s.data for s in stcs], axis=0)
data = np.average([abs(s.data) for s in stcs],
                  axis=0)  # with absolute values !!
stc = mne.SourceEstimate(data, stcs[0].vertices, stcs[0].tmin, stcs[0].tstep,
                         stcs[0].subject)
stc.plot(
예제 #13
0
analysis_name = "SequenceID_StimPosition_Complexity_RepeatAlter_ChunkBeginning_ChunkEnd_OpenedChunks_ChunkDepth_ChunkNumber_WithinChunkPosition_ClosedChunks_no_baseline"
analysis_name = "StimID_SequenceID_StimPosition_Complexity_RepeatAlter_ChunkBeginning_ChunkEnd_OpenedChunks_ChunkDepth_ChunkNumber_WithinChunkPosition_ClosedChunks_no_baseline"
analysis_name = "_no_baseline_all_dataStimID_SequenceID_StimPosition_Complexity_RepeatAlter_ChunkBeginning_ChunkEnd_OpenedChunks_ChunkDepth_ChunkNumber_WithinChunkPosition_ClosedChunks_no_baseline"

# ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======
#                                            LOOKING AT PREDICTORS
# ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======  ======

diss_matrix, md, md2, dis, times = rsa_funcs.Predictor_dissimilarity_matrix_and_md(
    analysis_name)

#  --- Visualize the predictor matrices ---

# tick_filter=lambda md: md['StimPosition'] == 1
save_regressors_path = config.result_path + "/rsa/dissim/" + analysis_name + '/regressors_matrix/'
utils.create_folder(save_regressors_path)

for key in diss_matrix.keys():
    viz_predictor_mats(eval('dis.' + key), md, md2=md2)
    plt.gcf().savefig(save_regressors_path + key + '.png')
    plt.close('all')

# --- Determine which regressors are too correlated ---

correlation_matrix = np.zeros(
    (len(diss_matrix.keys()), len(diss_matrix.keys())))

for k, key1 in enumerate(diss_matrix.keys()):
    for l, key2 in enumerate(diss_matrix.keys()):
        r = np.corrcoef([
            np.reshape(diss_matrix[key1].data, diss_matrix[key1].data.size),