Example #1
0
def writing_ply_files(subject, surface_type, lab, facL_lab, vtx, vtxL, labels,
                      hemi, output_fol):
    # Vertices for the current label
    nV = vtx.shape[0]
    facL_lab_flat = utils.list_flatten(facL_lab)
    if len(facL_lab_flat) == 0:
        print("Cant write {}, no vertices!".format(labels[lab]))
        return True
    vidx = list(set(facL_lab_flat))
    vtxL[lab] = vtx[vidx]
    # Reindex the faces
    tmp = np.zeros(nV, dtype=np.int)
    tmp[vidx] = np.arange(len(vidx))
    try:
        facL_lab = np.reshape(tmp[facL_lab_flat],
                              (len(facL_lab), len(facL_lab[0])))
    except:
        print(traceback.format_exc())
        dumps_fol = utils.make_dir(op.join(MMVT_DIR, subject, 'dumps'))
        utils.save((lab, facL_lab, vtx, vtxL, labels, hemi, output_fol),
                   op.join(dumps_fol,
                           'parcelate_cortex_writing_ply_files.pkl'))
        return False

    # Save the resulting surface
    label_name = '{}-{}.ply'.format(
        lu.get_label_hemi_invariant_name(labels[lab].name), hemi)
    # print('Writing {}'.format(op.join(output_fol, label_name)))
    # todo: add distance between hemis if inflated like with the activity surfaces
    if surface_type == 'inflated':
        verts_offset = 55 if hemi == 'rh' else -55
        vtxL[lab][:, 0] = vtxL[lab][:, 0] + verts_offset
    utils.write_ply_file(vtxL[lab], facL_lab, op.join(output_fol, label_name),
                         True)
    return op.isfile(op.join(output_fol, label_name))
Example #2
0
def calc_vertices_lookup_tables(subject, modality, window, inverse_method,
                                labels, inv):
    output_fname = op.join(MMVT_DIR, subject, 'vertices_lookup_tables.pkl')
    # if op.isfile(output_fname):
    #     vertices_ind_to_no_lookup, vertices_no_to_ind_lookup, vertices_labels_lookup = utils.load(output_fname)
    #     return vertices_lookup, vertices_labels_lookup
    root_dir = op.join(EEG_DIR if modality == 'eeg' else MEG_DIR, subject)
    powers_fol = op.join(
        root_dir,
        '{}-epilepsy-{}-{}-{}-induced_power'.format(subject, inverse_method,
                                                    modality, window))
    powers_files = glob.glob(
        op.join(powers_fol, 'epilepsy_*_induced_power.npy'))
    start_ind = 0
    vertice_label = None
    vertices_ind_to_no_lookup = {}
    vertices_labels_lookup = {}
    for file_ind, powers_fname in enumerate(powers_files):
        label_name = utils.namebase(powers_fname).split('_')[1]
        label = [l for l in labels if l.name == label_name][0]
        vertno, src_sel = mne.minimum_norm.inverse.label_src_vertno_sel(
            label, inv['src'])
        for vert_ind, vert_num in zip(
                range(start_ind, start_ind + len(src_sel)),
                vertno[0] if label.hemi == 'lh' else vertno[1]):
            vertices_ind_to_no_lookup[vert_ind] = vert_num
            vertices_labels_lookup[vert_ind] = label
        # if start_ind <= vertices_ind < start_ind + len(src_sel):
        #     vertice_label = label
        #     break
        start_ind += len(src_sel)
    utils.save((vertices_ind_to_no_lookup, vertices_labels_lookup),
               output_fname)
    return vertices_ind_to_no_lookup, vertices_labels_lookup
Example #3
0
def morph_labels(morph_from_subject, morph_to_subject, atlas, hemi, n_jobs=1):
    labels_fol = op.join(SUBJECTS_DIR, morph_to_subject, 'label')
    labels_fname = op.join(labels_fol,
                           '{}.{}.pkl'.format(hemi, atlas, morph_from_subject))
    annot_file = op.join(SUBJECTS_DIR, morph_from_subject, 'label',
                         '{}.{}.annot'.format(hemi, atlas))
    if not op.isfile(annot_file):
        print("Can't find the annot file in {}!".format(annot_file))
        return []
    if not op.isfile(labels_fname):
        labels = mne.read_labels_from_annot(morph_from_subject,
                                            atlas,
                                            subjects_dir=SUBJECTS_DIR,
                                            hemi=hemi)
        if morph_from_subject != morph_to_subject:
            morphed_labels = []
            for label in labels:
                label.values.fill(1.0)
                morphed_label = label.morph(morph_from_subject,
                                            morph_to_subject, 5, None,
                                            SUBJECTS_DIR, n_jobs)
                morphed_labels.append(morphed_label)
            labels = morphed_labels
        utils.save(labels, labels_fname)
    else:
        labels = utils.load(labels_fname)
    return labels
Example #4
0
def calc_labels_center_of_mass(subject, atlas, read_from_annotation=True, surf_name='pial', labels_fol='', labels=None):
    import csv
    # if (read_from_annotation):
    #     labels = mne.read_labels_from_annot(subject, atlas, 'both', surf_name, subjects_dir=SUBJECTS_DIR)
    #     if len(labels) == 0:
    #         print('No labels were found in {} annotation file!'.format(atlas))
    # else:
    #     labels = []
    #     if labels_fol == '':
    #         labels_fol = op.join(SUBJECTS_DIR, subject, 'label', atlas)
    #     for label_file in glob.glob(op.join(labels_fol, '*.label')):
    #         label = mne.read_label(label_file)
    #         labels.append(label)
    #     if len(labels) == 0:
    #         print('No labels were found in {}!'.format(labels_fol))
    labels = lu.read_labels(subject, SUBJECTS_DIR, atlas)
    if len(labels) > 0:
        center_of_mass = lu.calc_center_of_mass(labels)
        with open(op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.csv'.format(atlas)), 'w') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            for label in labels:
                writer.writerow([label.name, *center_of_mass[label.name]])
        com_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.pkl'.format(atlas))
        blend_fname = op.join(MMVT_DIR, subject, '{}_center_of_mass.pkl'.format(atlas))
        utils.save(center_of_mass, com_fname)
        shutil.copyfile(com_fname, blend_fname)
    return len(labels) > 0 and op.isfile(com_fname) and op.isfile(blend_fname)
Example #5
0
def calc_eeg_mesh_verts_sensors(subject, sensors_verts, helmet_verts, modality='eeg'):
    from scipy.spatial.distance import cdist
    max_dists = np.max(np.min(cdist(sensors_verts, helmet_verts), axis=1))
    if max_dists > 0.01:
        raise Exception('Wrong distances!')
    eeg_helmet_indices = np.argmin(cdist(sensors_verts, helmet_verts), axis=1)
    utils.save(eeg_helmet_indices, op.join(MMVT_DIR, subject, modality, '{}_vertices_sensors.pkl'.format(modality)))
Example #6
0
def convert_dipoles_to_mri_space(subject, dipoles, overwrite=False):
    '''
    :param dipole:
    :return:
    '''
    output_fname = op.join(utils.make_dir(op.join(MMVT_DIR, subject, 'meg')), 'dipoles.pkl')
    if op.isfile(output_fname) and not overwrite:
        return True
    # If the trans file doesn't exist, you should calculate it using mne-python / MNE-analyzer
    trans_file = meg.find_trans_file(subject=subject)
    head_mri_trans = mne.transforms.read_trans(trans_file)
    head_mri_trans = mne.transforms._ensure_trans(head_mri_trans, 'head', 'mri')

    mri_dipoles = defaultdict(list)
    for dipole_name, dipoles in dipoles.items():
        for dipole in dipoles:
            # begin end(ms)  X (mm)  Y (mm)  Z (mm)  Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm)  g(%)
            begin_t, end_t, x, y, z, q, qx, qy, qz, gf = dipole
            mri_pos = mne.transforms.apply_trans(head_mri_trans, [np.array([x, y, z]) * 1e-3])[0]
            dir_xyz = mne.transforms.apply_trans(head_mri_trans, [np.array([qx, qy, qz]) / q])[0]
            print('{}: loc:{} dir:{}'.format(dipole_name, mri_pos, dir_xyz))
            mri_dipoles[dipole_name].append([begin_t, end_t, *mri_pos, q, *dir_xyz, gf])
    print('Saving dipoles in {}'.format(output_fname))
    utils.save(mri_dipoles, output_fname)
    return op.isfile(output_fname)
Example #7
0
def calc_labels_center_of_mass(subject, atlas, read_from_annotation=True, surf_name='pial', labels_fol='', labels=None):
    import csv
    # if (read_from_annotation):
    #     labels = mne.read_labels_from_annot(subject, atlas, 'both', surf_name, subjects_dir=SUBJECTS_DIR)
    #     if len(labels) == 0:
    #         print('No labels were found in {} annotation file!'.format(atlas))
    # else:
    #     labels = []
    #     if labels_fol == '':
    #         labels_fol = op.join(SUBJECTS_DIR, subject, 'label', atlas)
    #     for label_file in glob.glob(op.join(labels_fol, '*.label')):
    #         label = mne.read_label(label_file)
    #         labels.append(label)
    #     if len(labels) == 0:
    #         print('No labels were found in {}!'.format(labels_fol))
    labels = lu.read_labels(subject, SUBJECTS_DIR, atlas)
    if len(labels) > 0:
        center_of_mass = lu.calc_center_of_mass(labels)
        with open(op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.csv'.format(atlas)), 'w') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            for label in labels:
                writer.writerow([label.name, *center_of_mass[label.name]])
        com_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.pkl'.format(atlas))
        blend_fname = op.join(MMVT_DIR, subject, '{}_center_of_mass.pkl'.format(atlas))
        utils.save(center_of_mass, com_fname)
        shutil.copyfile(com_fname, blend_fname)
    return len(labels) > 0 and op.isfile(com_fname) and op.isfile(blend_fname)
Example #8
0
def find_clusters(subject,
                  contrast_name,
                  t_val,
                  atlas,
                  volume_name,
                  input_fol='',
                  load_from_annotation=True,
                  n_jobs=1):
    if input_fol == '':
        input_fol = op.join(BLENDER_ROOT_DIR, subject, 'fmri')
    contrast, connectivity, verts = init_clusters(subject, volume_name,
                                                  input_fol)
    clusters_labels = dict(threshold=t_val, values=[])
    for hemi in utils.HEMIS:
        clusters, _ = mne_clusters._find_clusters(
            contrast[hemi], t_val, connectivity=connectivity[hemi])
        # blobs_output_fname = op.join(input_fol, 'blobs_{}_{}.npy'.format(contrast_name, hemi))
        # print('Saving blobs: {}'.format(blobs_output_fname))
        # save_clusters_for_blender(clusters, contrast[hemi], blobs_output_fname)
        clusters_labels_hemi = find_clusters_overlapped_labeles(
            subject, clusters, contrast[hemi], atlas, hemi, verts[hemi],
            load_from_annotation, n_jobs)
        if clusters_labels_hemi is None:
            print("Can't find clusters in {}!".format(hemi))
        else:
            clusters_labels['values'].extend(clusters_labels_hemi)
    # todo: should be pkl, not npy
    clusters_labels_output_fname = op.join(
        BLENDER_ROOT_DIR, subject, 'fmri',
        'clusters_labels_{}.pkl'.format(volume_name))
    print('Saving clusters labels: {}'.format(clusters_labels_output_fname))
    utils.save(clusters_labels, clusters_labels_output_fname)
def calc_ana(overwrite=False, only_linda=False):
    good_subjects_fname = op.join(root_path, 'good_subjects.npz')
    ana_results_fname = op.join(root_path, 'ana_results.pkl')
    if not op.isfile(ana_results_fname) or not op.isfile(
            good_subjects_fname) or overwrite:
        laterality, to_use, TR, values, all_subjects = read_scoring()
        if only_linda:
            subject_list = get_linda_subjects()
            inds = np.where(np.in1d(all_subjects, subject_list))[0]
            print(inds)
            good_subjects = all_subjects[inds]
            master_grouping = (np.sum(
                (values <= 5).astype(int), axis=1) > 0).astype(int)
            subject_groups = master_grouping[inds]
            disturbed_inds = np.array(np.where(subject_groups == 1)[0])
            preserved_inds = np.array(np.where(subject_groups == 0)[0])
            # laterality = ['L'] * len(good_subjects)
            bad_indices, labels = check_subjects_labels(
                good_subjects, check_labels_indices=False)
        else:
            good_subjects, good_subjects_inds, labels = find_good_inds(
                all_subjects, only_left, TR, fast_TR, to_use, laterality)
            disturbed_inds, preserved_inds = calc_disturbed_preserved_inds(
                good_subjects_inds, values)
        dFC_res, std_mean_res, stat_conn_res = get_subjects_dFC(good_subjects)
        utils.save((dFC_res, std_mean_res, stat_conn_res, disturbed_inds,
                    preserved_inds, good_subjects, labels, laterality),
                   op.join(root_path, 'ana_results.pkl'))
    else:
        (dFC_res, std_mean_res, stat_conn_res, disturbed_inds, preserved_inds, good_subjects, labels, laterality) = \
            utils.load(ana_results_fname)
    print('disturbed_inds: {}'.format(disturbed_inds))
    print('preserved_inds: {}'.format(preserved_inds))
    return dFC_res, std_mean_res, stat_conn_res, disturbed_inds, preserved_inds, good_subjects, labels, laterality
def read_morphed_electrodes(electrodes,
                            template_system,
                            subjects_dir,
                            mmvt_dir,
                            overwrite=False):
    subject_to = 'fsaverage5' if template_system == 'ras' else 'colin27' if template_system == 'mni' else template_system
    output_fname = op.join(mmvt_dir, subject_to, 'electrodes',
                           'template_electrodes.pkl')
    if op.isfile(output_fname) and not overwrite:
        return
    t1_header = nib.load(op.join(subjects_dir, subject_to, 'mri',
                                 'T1.mgz')).header
    trans = t1_header.get_vox2ras_tkr()
    template_electrodes = defaultdict(list)
    bad_subjects, good_subjects = [], []
    for subject in electrodes.keys():
        if subject == subject_to:
            continue
        input_fname = op.join(subjects_dir, subject, 'electrodes',
                              f'stim_electrodes_to_{subject_to}.txt')
        if not op.isfile(input_fname):
            bad_subjects.append(subject)
            continue
        print('Reading {} ({})'.format(
            input_fname, utils.file_modification_time(input_fname)))
        vox = np.genfromtxt(input_fname, dtype=np.float, delimiter=' ')
        tkregs = apply_trans(trans, vox)
        for tkreg, (elc_name, _) in zip(tkregs, electrodes[subject]):
            template_electrodes[subject].append(
                (f'{subject}_{elc_name}', tkreg))
        good_subjects.append(subject)
    utils.save(template_electrodes, output_fname)
    print('read_morphed_electrodes: {}'.format(op.isfile(output_fname)))
    print('good subjects: {}'.format(good_subjects))
    print('bad subjects: {}'.format(bad_subjects))
Example #11
0
def calc_dipoles_rois(subject, atlas='laus125', overwrite=False, n_jobs=4):
    links_dir = utils.get_links_dir()
    subjects_dir = utils.get_link_dir(links_dir, 'subjects')
    mmvt_dir = utils.get_link_dir(links_dir, 'mmvt')
    diploes_rois_output_fname = op.join(mmvt_dir, subject, 'meg', 'dipoles_rois.pkl')
    if op.isfile(diploes_rois_output_fname) and not overwrite:
        diploes_rois = utils.load(diploes_rois_output_fname)
        for dip in diploes_rois.keys():
            diploes_rois[dip]['cortical_probs'] *= 1/sum(diploes_rois[dip]['cortical_probs'])
            diploes_rois[dip]['subcortical_probs'] = []
            diploes_rois[dip]['subcortical_rois'] = []
        # coritcal_labels = set(utils.flat_list_of_lists([diploes_rois[k]['cortical_rois'] for k in diploes_rois.keys()]))
        utils.save(diploes_rois, diploes_rois_output_fname)
        return True

    diploes_input_fname = op.join(mmvt_dir, subject, 'meg', 'dipoles.pkl')
    if not op.isfile(diploes_input_fname):
        print('No dipoles file!')
        return False

    labels = lu.read_labels(subject, subjects_dir, atlas, n_jobs=n_jobs)
    labels = list([{'name': label.name, 'hemi': label.hemi, 'vertices': label.vertices}
                   for label in labels])
    if len(labels) == 0:
        print('Can\'t find the labels for atlas {}!'.format(atlas))
        return False

    # find the find_rois package
    mmvt_code_fol = utils.get_mmvt_code_root()
    ela_code_fol = op.join(utils.get_parent_fol(mmvt_code_fol), 'electrodes_rois')
    if not op.isdir(ela_code_fol) or not op.isfile(op.join(ela_code_fol, 'find_rois', 'main.py')):
        print("Can't find ELA folder!")
        print('git pull https://github.com/pelednoam/electrodes_rois.git')
        return False

    # load the find_rois package
    try:
        import sys
        if ela_code_fol not in sys.path:
            sys.path.append(ela_code_fol)
        from find_rois import main as ela
    except:
        print('Can\'t load find_rois package!')
        utils.print_last_error_line()
        return False

    dipoles_dict = utils.load(diploes_input_fname)
    diploles_names, dipoles_pos = [], []
    for cluster_name, dipoles in dipoles_dict.items():
        for begin_t, _, x, y, z, _, _, _, _, _ in dipoles:
            dipole_name = '{}_{}'.format(cluster_name, begin_t) if len(dipoles) > 1 else cluster_name
            diploles_names.append(dipole_name.replace(' ', ''))
            dipoles_pos.append([k * 1e3 for k in [x, y, z]])
    dipoles_rois = ela.identify_roi_from_atlas(
        atlas, labels, diploles_names, dipoles_pos, approx=3, elc_length=0, hit_only_cortex=True,
        subjects_dir=subjects_dir, subject=subject, n_jobs=n_jobs)
    # Convert the list to a dict
    dipoles_rois_dict = {dipoles_rois['name']: dipoles_rois for dipoles_rois in dipoles_rois}
    utils.save(dipoles_rois_dict, diploes_rois_output_fname)
Example #12
0
def save_matlab_labels_vertices(subject, aparc_name):
    for hemi in HEMIS:
        matlab_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}.{}.annot_labels.m'.format(hemi, aparc_name))
        if op.isfile(matlab_fname):
            labels_dic = matlab_utils.matlab_cell_arrays_to_dict(matlab_fname)
            utils.save(labels_dic, op.join(MMVT_DIR, subject, 'labels_dic_{}_{}.pkl'.format(aparc_name, hemi)))
        else:
            return False
    return True
Example #13
0
def _calc_func_rois_vertives_lookup_parallel(labels_fol):
    lookup_fname = op.join(labels_fol, 'vertices_lookup.pkl')
    labels = [mne.read_label(label_fname) for label_fname in glob.glob(op.join(labels_fol, '*.label'))]
    lookup = {'rh': {}, 'lh': {}}
    for label in labels:
        for vertice in label.vertices:
            lookup[label.hemi][vertice] = label.name
    print('Saving {}'.format(lookup_fname))
    utils.save(lookup, lookup_fname)
Example #14
0
def save_matlab_labels_vertices(subject, aparc_name):
    for hemi in HEMIS:
        matlab_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}.{}.annot_labels.m'.format(hemi, aparc_name))
        if op.isfile(matlab_fname):
            labels_dic = matlab_utils.matlab_cell_arrays_to_dict(matlab_fname)
            utils.save(labels_dic, op.join(MMVT_DIR, subject, 'labels_dic_{}_{}.pkl'.format(aparc_name, hemi)))
        else:
            return False
    return True
Example #15
0
def read_morphed_electrodes(subjects_electrodes,
                            subject_to='colin27',
                            bipolar=True,
                            prefix='morphed_'):
    fol = utils.make_dir(op.join(MMVT_DIR, subject_to, 'electrodes'))
    bipolar_output_fname = op.join(
        fol, '{}electrodes_bipolar_positions.npz'.format(prefix))
    monopolar_output_fname = op.join(
        fol, '{}electrodes_positions.npz'.format(prefix))
    bad_electrodes, bad_subjects = [], set()
    template_bipolar_electrodes, template_electrodes = defaultdict(
        list), defaultdict(list)
    morphed_electrodes_fname = op.join(MMVT_DIR, subject_to, 'electrodes',
                                       'morphed_electrodes.pkl')
    if False:  #op.isfile(morphed_electrodes_fname):
        template_bipolar_electrodes, template_electrodes = utils.load(
            morphed_electrodes_fname)
    else:
        for subject, electodes_names in subjects_electrodes.items():
            electrodes_pos = {}
            for elecs_bipolar_names in electodes_names:
                electrodes_found = True
                for elec_name in elecs_bipolar_names:
                    elec_input_fname = op.join(
                        MMVT_DIR, subject, 'electrodes', 'ela_morphed',
                        '{}_ela_morphed.npz'.format(elec_name))
                    if not op.isfile(elec_input_fname):
                        print('{} {} not found!'.format(subject, elec_name))
                        bad_electrodes.append('{}_{}'.format(
                            subject, elec_name))
                        bad_subjects.add(subject)
                        electrodes_found = False
                        break
                    else:
                        d = np.load(elec_input_fname)
                    electrodes_pos[elec_name] = d['pos']
                    template_electrodes[subject].append((elec_name, d['pos']))
                if not electrodes_found:
                    continue
                elc1, elc2 = elecs_bipolar_names
                (group, num1), (_, num2) = utils.elec_group_number(
                    elc1), utils.elec_group_number(elc2)
                if num1 > num2:
                    elc1, elc2 = elc2, elc1
                    num1, num2 = num2, num1
                bipolar_elec_name = '{}{}-{}'.format(group, num2, num1)
                pos1, pos2 = electrodes_pos[elc1], electrodes_pos[elc2]
                bipolar_pos = pos1 + (pos2 - pos1) / 2
                template_bipolar_electrodes[subject].append(
                    (bipolar_elec_name, bipolar_pos))
        utils.save(template_bipolar_electrodes, morphed_electrodes_fname)

    save_electrodes(template_electrodes, monopolar_output_fname)
    save_electrodes(template_bipolar_electrodes, bipolar_output_fname)
    print('Bad subjects:')
    print(bad_subjects)
    return monopolar_output_fname, bipolar_output_fname
Example #16
0
def calc_scan_rescan_diff(subject, do_plot_hist=True, overwrite=False):
    means_input_fnames = [
        op.join(
            op.join(RESULTS_FOL, 'aparc_aseg_hists', subject, scan_rescan,
                    'aparc_values.pkl')) for scan_rescan in SCAN_RESCAN
    ]
    if not all([op.isfile(fname) for fname in means_input_fnames]):
        print('calc_scan_rescan_diff: {} no all files exist!'.format(subject))
        return
    means_diff_fname = op.join(RESULTS_FOL, 'aparc_aseg_hists', subject,
                               'aparc_values_diffs.pkl')
    mmvt_file_name = '{}_ASL_scan_rescan_diffs'.format(subject)
    mmvt_output_fname = op.join(
        utils.make_dir(op.join(MMVT_DIR, 'fsaverage', 'labels',
                               'labels_data')),
        '{}.npz'.format(mmvt_file_name))
    if op.isfile(mmvt_output_fname) and op.isfile(
            means_diff_fname) and not overwrite:
        print('calc_scan_rescan_diff: files exist for {}'.format(subject))
        return True
    scan_means, rescan_means = [
        utils.load(fname) for fname in means_input_fnames
    ]
    region_names = scan_means.keys()
    diffs = {
        region: scan_means.get(region, 0) - rescan_means.get(region, 0)
        for region in region_names
    }
    utils.save(diffs, means_diff_fname)
    data = np.array([diffs[region_name] for region_name in region_names])
    labels_names = [get_aparc_label_name(region) for region in region_names]
    minmax = utils.calc_abs_minmax(data)

    figure_output_fname = op.join(RESULTS_FOL, 'aparc_aseg_hists', subject,
                                  'labels_scan_rescan_diffs.jpg')
    if do_plot_hist and (not op.isfile(figure_output_fname) or overwrite):
        fig = plt.figure()
        # ax = fig.add_subplot(111)
        x = range(len(labels_names))
        plt.bar(x, data)
        # plt.xticks(x, labels_names, rotation=90)
        plt.title('{} scan-rescan ASL diff'.format(subject))
        plt.ylabel('ASL diff')
        print('Saving bar plot in {}'.format(figure_output_fname))
        plt.savefig(figure_output_fname)
        plt.close()

    np.savez(mmvt_output_fname,
             names=labels_names,
             atlas='aparc',
             data=data,
             title=mmvt_file_name,
             data_min=-minmax,
             data_max=minmax,
             cmap='BuPu-YlOrRd')
Example #17
0
def find_functional_rois(subject,
                         ictal_clips,
                         modality,
                         seizure_times,
                         atlas,
                         min_cluster_size,
                         inverse_method,
                         overwrite=False,
                         n_jobs=4):
    fwd_usingMEG, fwd_usingEEG = meg.get_fwd_flags(modality)
    modality_fol = op.join(MMVT_DIR, subject, meg.modality_fol(modality))
    stcs_fol = op.join(modality_fol,
                       'ictal-{}-zvals-stcs'.format(inverse_method))
    ictlas_fname = op.join(
        modality_fol, '{}-epilepsy-{}-{}-amplitude-zvals-ictals.pkl'.format(
            subject, inverse_method, modality))
    # Make sure we have a morph map, and if not, create it here, and not in the parallel function
    mne.surface.read_morph_map(subject, subject, subjects_dir=SUBJECTS_DIR)
    connectivity = anat.load_connectivity(subject)
    if overwrite:
        utils.delete_folder_files(
            op.join(MMVT_DIR, subject, modality_fol, 'clusters'))
    if op.isfile(ictlas_fname):
        ictals = utils.load(ictlas_fname)
    else:
        params = [(subject, clip_fname, inverse_method, modality,
                   seizure_times, stcs_fol, n_jobs)
                  for clip_fname in ictal_clips]
        ictals = utils.run_parallel(_calc_ictal_and_baseline_parallel, params,
                                    n_jobs)
        utils.save(ictals, ictlas_fname)
    for stc_name, ictal_stc, mean_baseline in ictals:
        max_ictal = ictal_stc.data.max()
        if max_ictal < mean_baseline:
            print('max ictal ({}) < mean baseline ({})!'.format(
                max_ictal, mean_baseline))
            continue
        meg.find_functional_rois_in_stc(subject,
                                        subject,
                                        atlas,
                                        utils.namebase(stc_name),
                                        mean_baseline,
                                        threshold_is_precentile=False,
                                        extract_time_series_for_clusters=False,
                                        time_index=0,
                                        min_cluster_size=min_cluster_size,
                                        min_cluster_max=mean_baseline,
                                        fwd_usingMEG=fwd_usingMEG,
                                        fwd_usingEEG=fwd_usingEEG,
                                        stc_t_smooth=ictal_stc,
                                        modality=modality,
                                        connectivity=connectivity,
                                        n_jobs=n_jobs)
Example #18
0
def load_coherence_meta_data_from_matlab(subject, matlab_electrodes_data_file):
    input_file = op.join(SUBJECTS_DIR, subject, 'electrodes',
                         matlab_electrodes_data_file)
    d = utils.Bag(sio.loadmat(input_file))
    d['electrodes'] = [e[0][0].astype(str) for e in d['electrodes']]
    for f in ['Tdurr', 'Toffset', 'dt']:
        d[f] = d[f][0][0]
    meta_data = {
        f: d[f]
        for f in d.keys() if f in ['Tdurr', 'Toffset', 'dt', 'electrodes']
    }
    utils.save(meta_data,
               op.join(SUBJECTS_DIR, subject, 'electrodes_coh_meta_data.pkl'))
Example #19
0
def create_spatial_connectivity(subject):
    try:
        connectivity_per_hemi = {}
        for hemi in utils.HEMIS:
            d = np.load(op.join(SUBJECTS_DIR, subject, 'mmvt', '{}.pial.npz'.format(hemi)))
            connectivity_per_hemi[hemi] = mne.spatial_tris_connectivity(d['faces'])
        utils.save(connectivity_per_hemi, op.join(MMVT_DIR, subject, 'spatial_connectivity.pkl'))
        success = True
    except:
        print('Error in create_spatial_connectivity!')
        print(traceback.format_exc())
        success = False
    return success
def read_morphed_electrodes(electrodes,
                            template_system,
                            subjects_dir,
                            mmvt_dir,
                            overwrite=False):
    subject_to = 'fsaverage' if template_system == 'ras' else 'colin27' if template_system == 'mni' else template_system
    fol = utils.make_dir(op.join(mmvt_dir, subject_to, 'electrodes'))
    output_fname = op.join(fol, 'template_electrodes.pkl')
    if op.isfile(output_fname) and not overwrite:
        return
    subject_to_mri = subject_to  #'cvs_avg35_inMNI152' if subject_to == 'fsaverage' else subject_to
    t1_header = nib.load(op.join(subjects_dir, subject_to_mri, 'mri',
                                 'T1.mgz')).header
    brain_mask_fname = op.join(subjects_dir, subject_to_mri, 'mri',
                               'brainmask.mgz')
    brain_mask = nib.load(brain_mask_fname).get_data() if op.isfile(
        brain_mask_fname) else None
    trans = t1_header.get_vox2ras_tkr()
    template_electrodes = defaultdict(list)
    bad_subjects, good_subjects = [], []
    for subject in electrodes.keys():
        if subject == subject_to:
            continue
        morphed_electrodes_file_name = 'electrodes_morph_to_{}.txt'.format(
            subject_to)
        input_fname = op.join(MMVT_DIR, subject, 'electrodes',
                              morphed_electrodes_file_name)
        if not op.isfile(input_fname):
            print(
                'read_morphed_electrodes: Can\'t find {}!'.format(input_fname))
            bad_subjects.append(subject)
            continue
        print('Reading {} ({})'.format(
            input_fname, utils.file_modification_time(input_fname)))
        voxels = np.genfromtxt(input_fname, dtype=np.float, delimiter=' ')
        electrodes_names = [elc_name for (elc_name, _) in electrodes[subject]]
        if subject_to == 'fsaverage':
            voxels = tut.mni152_mni305(voxels)
        check_if_electrodes_inside_the_brain(subject, voxels, electrodes_names,
                                             brain_mask)
        write_morphed_electrodes_vox_into_csv(subject, subject_to, voxels,
                                              electrodes_names)
        tkregs = apply_trans(trans, voxels)
        for tkreg, (elc_name, _) in zip(tkregs, electrodes[subject]):
            template_electrodes[subject].append(
                ('{}_{}'.format(subject, elc_name), tkreg))
        good_subjects.append(subject)
    utils.save(template_electrodes, output_fname)
    print('read_morphed_electrodes: {}'.format(op.isfile(output_fname)))
    print('good subjects: {}'.format(good_subjects))
    print('bad subjects: {}'.format(bad_subjects))
Example #21
0
def load_tracula_trk(subject):
    tracks_fols = utils.get_subfolders(op.join(DTI_DIR, subject, 'dpath'))
    output_fol = op.join(BLENDER_ROOT_DIR, subject, 'dti', 'tracula')
    utils.make_dir(output_fol)
    for track_fol in tracks_fols:
        track_fol_name = os.path.basename(track_fol)
        print('Reading {}'.format(track_fol_name))
        track_gen, hdr = nib.trackvis.read(op.join(track_fol, 'path.pd.trk'), as_generator=True, points_space='rasmm')
        hdr = convert_header(hdr)
        vox2ras_trans = get_vox2ras_trans(subject)
        tracks = read_tracks(track_gen, hdr, vox2ras_trans)
        output_fname = op.join(output_fol, '{}.pkl'.format(track_fol_name))
        utils.save(tracks, output_fname)
        print('Save in {}'.format(output_fname))
Example #22
0
def load_tracula_trk(subject):
    tracks_fols = utils.get_subfolders(op.join(DTI_DIR, subject, 'dpath'))
    output_fol = op.join(BLENDER_ROOT_DIR, subject, 'dti', 'tracula')
    utils.make_dir(output_fol)
    for track_fol in tracks_fols:
        track_fol_name = os.path.basename(track_fol)
        print('Reading {}'.format(track_fol_name))
        track_gen, hdr = nib.trackvis.read(op.join(track_fol, 'path.pd.trk'),
                                           as_generator=True,
                                           points_space='rasmm')
        hdr = convert_header(hdr)
        vox2ras_trans = get_vox2ras_trans(subject)
        tracks = read_tracks(track_gen, hdr, vox2ras_trans)
        output_fname = op.join(output_fol, '{}.pkl'.format(track_fol_name))
        utils.save(tracks, output_fname)
        print('Save in {}'.format(output_fname))
Example #23
0
def find_clusters_tval_hist(subject, contrast_name, output_fol, input_fol='', n_jobs=1):
    contrast, connectivity, _ = init_clusters(subject, contrast_name, input_fol)
    clusters = {}
    tval_values = np.arange(2, 20, 0.1)
    now = time.time()
    for ind, tval in enumerate(tval_values):
        try:
            # utils.time_to_go(now, ind, len(tval_values), 5)
            clusters[tval] = {}
            for hemi in utils.HEMIS:
                clusters[tval][hemi], _ = mne_clusters._find_clusters(
                    contrast[hemi], tval, connectivity=connectivity[hemi])
            print('tval: {:.2f}, len rh: {}, lh: {}'.format(tval, max(map(len, clusters[tval]['rh'])),
                                                        max(map(len, clusters[tval]['rh']))))
        except:
            print('error with tval {}'.format(tval))
    utils.save(clusters, op.join(output_fol, 'clusters_tval_hist.pkl'))
Example #24
0
def trans_tal_coords(files, template='colin27', overwrite=False):
    output_fol = utils.make_dir(op.join(MMVT_DIR, template, 'rois_peaks'))
    output_fname = op.join(output_fol, 'rois.pkl')
    if not op.isfile(output_fname) or overwrite:
        rois = get_tal_coordaintes(files)
        utils.save(rois, output_fname)
    else:
        rois = utils.load(output_fname)
    for roi in rois.keys():
        csv_fname = op.join(output_fol, '{}_mni.csv'.format(roi))
        csv_tal_fname = op.join(output_fol, '{}_tal.csv'.format(roi))
        with open(csv_fname, 'w') as csv_file_mni, open(csv_tal_fname,
                                                        'w') as csv_file_tal:
            csv_mni_writer = csv.writer(csv_file_mni, delimiter=',')
            csv_tal_writer = csv.writer(csv_file_tal, delimiter=',')
            for mni, tal in zip(rois[roi]['mni'], rois[roi]['tal']):
                csv_mni_writer.writerow(mni)
                csv_tal_writer.writerow(tal)
Example #25
0
def sort_electrodes_groups(subject, bipolar, do_plot=True):
    from sklearn.decomposition import PCA
    electrodes, pos = read_electrodes_file(subject, bipolar)
    first_electrodes, first_pos, elc_pos_groups = find_first_electrode_per_group(electrodes, pos, bipolar)
    pca = PCA(n_components=2)
    pca.fit(first_pos)
    transformed_pos = pca.transform(pos)
    # transformed_pos_3d = PCA(n_components=3).fit(first_pos).transform(pos)
    transformed_first_pos = pca.transform(first_pos)
    groups_hemi = find_groups_hemi(electrodes, transformed_pos, bipolar)
    sorted_groups = sort_groups(first_electrodes, transformed_first_pos, groups_hemi, bipolar)
    print(sorted_groups)
    utils.save(sorted_groups, op.join(MMVT_DIR, subject, 'electrodes', 'sorted_groups.pkl'))
    if do_plot:
        # utils.plot_3d_scatter(pos, names=electrodes.tolist(), labels=first_electrodes.values())
        # electrodes_3d_scatter_plot(pos, first_pos)
        first_electrodes_names = list(first_electrodes.values())
        utils.plot_2d_scatter(transformed_first_pos, names=first_electrodes_names)
Example #26
0
def read_hippocampus_volumes(overwrite=False):
    output_fname = op.join(HOME_FOL, 'hippocampus_volumes.pkl')
    if not op.isfile(output_fname) or overwrite:
        subjects = get_subjects()
        all_volumes = {'rh': [], 'lh': []}
        for subject in tqdm(subjects):
            volumes = read_subject_hippocampus_volumes(subject)
            if volumes is None:
                continue
            for hemi in utils.HEMIS:
                all_volumes[hemi].append(volumes[hemi])
        utils.save(all_volumes, output_fname)
    else:
        all_volumes = utils.load(output_fname)
    for hemi in utils.HEMIS:
        x = np.array(all_volumes[hemi])
        plt.hist(np.diff(x))
        plt.title(hemi)
        plt.show()
Example #27
0
def save_labels_vertices(subject, aparc_name):
    annot_fname_temp = op.join(SUBJECTS_DIR, subject, 'label', '{}.{}.annot'.format('{hemi}', aparc_name))
    if not utils.hemi_files_exists(annot_fname_temp):
        pass
    labels_fnames = glob.glob(op.join(SUBJECTS_DIR, subject, 'label', aparc_name, '*.label'))
    if len(labels_fnames) > 0:
        labels = []
        for label_fname in labels_fnames:
            label = mne.read_label(label_fname)
            labels.append(label)
    else:
        # Read from the annotation file
        labels = utils.read_labels_from_annot(subject, aparc_name, SUBJECTS_DIR)
    labels_names, labels_vertices = defaultdict(list), defaultdict(list)
    for label in labels:
        labels_names[label.hemi].append(label.name)
        labels_vertices[label.hemi].append(label.vertices)
    output_fname = op.join(MMVT_DIR, subject, 'labels_vertices_{}.pkl'.format(aparc_name))
    utils.save((labels_names, labels_vertices), output_fname)
    return op.isfile(output_fname)
Example #28
0
def calc_labels_center_of_mass(subject, atlas, read_from_annotation=True, surf_name='pial', labels_fol='', labels=None):
    import csv
    labels = lu.read_labels(subject, SUBJECTS_DIR, atlas)
    if len(labels) > 0:
        if np.all(labels[0].pos == 0):
            verts = {}
            for hemi in utils.HEMIS:
                verts[hemi], _ = utils.read_pial_npz(subject, MMVT_DIR, hemi)
            for label in labels:
                label.pos = verts[label.hemi][label.vertices]
        center_of_mass = lu.calc_center_of_mass(labels)
        with open(op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.csv'.format(atlas)), 'w') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            for label in labels:
                writer.writerow([label.name, *center_of_mass[label.name]])
        com_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}_center_of_mass.pkl'.format(atlas))
        blend_fname = op.join(MMVT_DIR, subject, '{}_center_of_mass.pkl'.format(atlas))
        utils.save(center_of_mass, com_fname)
        shutil.copyfile(com_fname, blend_fname)
    return len(labels) > 0 and op.isfile(com_fname) and op.isfile(blend_fname)
Example #29
0
def save_labels_vertices(subject, aparc_name):
    annot_fname_temp = op.join(SUBJECTS_DIR, subject, 'label', '{}.{}.annot'.format('{hemi}', aparc_name))
    if not utils.hemi_files_exists(annot_fname_temp):
        pass
    labels_fnames = glob.glob(op.join(SUBJECTS_DIR, subject, 'label', aparc_name, '*.label'))
    if len(labels_fnames) > 0:
        labels = []
        for label_fname in labels_fnames:
            label = mne.read_label(label_fname)
            labels.append(label)
    else:
        # Read from the annotation file
        labels = utils.read_labels_from_annot(subject, aparc_name, SUBJECTS_DIR)
    labels_names, labels_vertices = defaultdict(list), defaultdict(list)
    for label in labels:
        labels_names[label.hemi].append(label.name)
        labels_vertices[label.hemi].append(label.vertices)
    output_fname = op.join(MMVT_DIR, subject, 'labels_vertices_{}.pkl'.format(aparc_name))
    utils.save((labels_names, labels_vertices), output_fname)
    return op.isfile(output_fname)
Example #30
0
def find_clusters(subject, contrast_name, t_val, atlas, volume_name, input_fol='', load_from_annotation=True, n_jobs=1):
    if input_fol == '':
        input_fol = op.join(BLENDER_ROOT_DIR, subject, 'fmri')
    contrast, connectivity, verts = init_clusters(subject, volume_name, input_fol)
    clusters_labels = dict(threshold=t_val, values=[])
    for hemi in utils.HEMIS:
        clusters, _ = mne_clusters._find_clusters(contrast[hemi], t_val, connectivity=connectivity[hemi])
        # blobs_output_fname = op.join(input_fol, 'blobs_{}_{}.npy'.format(contrast_name, hemi))
        # print('Saving blobs: {}'.format(blobs_output_fname))
        # save_clusters_for_blender(clusters, contrast[hemi], blobs_output_fname)
        clusters_labels_hemi = find_clusters_overlapped_labeles(
            subject, clusters, contrast[hemi], atlas, hemi, verts[hemi], load_from_annotation, n_jobs)
        if clusters_labels_hemi is None:
            print("Can't find clusters in {}!".format(hemi))
        else:
            clusters_labels['values'].extend(clusters_labels_hemi)
    # todo: should be pkl, not npy
    clusters_labels_output_fname = op.join(
        BLENDER_ROOT_DIR, subject, 'fmri', 'clusters_labels_{}.pkl'.format(volume_name))
    print('Saving clusters labels: {}'.format(clusters_labels_output_fname))
    utils.save(clusters_labels, clusters_labels_output_fname)
Example #31
0
def create_spatial_connectivity(subject):
    try:
        verts_neighbors_fname = op.join(MMVT_DIR, subject, 'verts_neighbors_{hemi}.pkl')
        connectivity_fname = op.join(MMVT_DIR, subject, 'spatial_connectivity.pkl')
        if utils.both_hemi_files_exist(verts_neighbors_fname) and op.isfile(verts_neighbors_fname):
            return True
        connectivity_per_hemi = {}
        for hemi in utils.HEMIS:
            neighbors = defaultdict(list)
            d = np.load(op.join(MMVT_DIR, subject, 'surf', '{}.pial.npz'.format(hemi)))
            connectivity_per_hemi[hemi] = mne.spatial_tris_connectivity(d['faces'])
            rows, cols = connectivity_per_hemi[hemi].nonzero()
            for ind in range(len(rows)):
                neighbors[rows[ind]].append(cols[ind])
            utils.save(neighbors, verts_neighbors_fname.format(hemi=hemi))
        utils.save(connectivity_per_hemi, connectivity_fname)
        success = True
    except:
        print('Error in create_spatial_connectivity!')
        print(traceback.format_exc())
        success = False
    return success
Example #32
0
def create_spatial_connectivity(subject):
    try:
        verts_neighbors_fname = op.join(MMVT_DIR, subject, 'verts_neighbors_{hemi}.pkl')
        connectivity_fname = op.join(MMVT_DIR, subject, 'spatial_connectivity.pkl')
        if utils.both_hemi_files_exist(verts_neighbors_fname) and op.isfile(verts_neighbors_fname):
            return True
        connectivity_per_hemi = {}
        for hemi in utils.HEMIS:
            neighbors = defaultdict(list)
            d = np.load(op.join(MMVT_DIR, subject, 'surf', '{}.pial.npz'.format(hemi)))
            connectivity_per_hemi[hemi] = mne.spatial_tris_connectivity(d['faces'])
            rows, cols = connectivity_per_hemi[hemi].nonzero()
            for ind in range(len(rows)):
                neighbors[rows[ind]].append(cols[ind])
            utils.save(neighbors, verts_neighbors_fname.format(hemi=hemi))
        utils.save(connectivity_per_hemi, connectivity_fname)
        success = True
    except:
        print('Error in create_spatial_connectivity!')
        print(traceback.format_exc())
        success = False
    return success
Example #33
0
def calc_accumulate_stc_as_time(subject, ictal_clips, modality, seizure_times, windows_length, windows_shift,
                                mean_baseline, inverse_method, n_jobs):
    modality_fol = op.join(MMVT_DIR, subject, meg.modality_fol(modality))
    stcs_fol = utils.make_dir(op.join(modality_fol, 'time_accumulate'))
    windows = calc_windows(seizure_times, windows_length, windows_shift)
    for ictal_clip in ictal_clips:
        output_fname = op.join(stcs_fol, '{}_labels_times.txt'.format(utils.namebase(ictal_clip)))
        output_str = '{}:\n'.format(utils.namebase(ictal_clip))
        for from_t, to_t in windows:
            ictals = calc_accumulate_stc(
                subject, [ictal_clip], modality, (from_t, to_t), mean_baseline, inverse_method, False, True, n_jobs)
            ictal_fname, stc_name, ictal_stc, mean_baseline, labels_times = ictals[0]
            utils.save(labels_times, op.join(stcs_fol, '{}_{:.2f}_{:.2f}_labels_times.pkl'.format(
                utils.namebase(stc_name), from_t, to_t)))
            for label_name, label_time in labels_times.items():
                output_str += '{}: {:.4f}\n'.format('_'.join(label_name.split('_')[-2:]), label_time)
        # stc_output_fname = op.join(stcs_fol, '{}-time-acc'.format(utils.namebase(stc_name)))
        # print('Saving accumulate stc: {}'.format(stc_output_fname))
        # ictal_stc.save(stc_output_fname)
        print('Saving {}'.format(output_fname))
        with open(output_fname, 'w') as output_file:
            print(output_str, file=output_file)
def calc_mann_whitney_results(dFC_res,
                              std_mean_res,
                              stat_conn_res,
                              disturbed_inds,
                              preserved_inds,
                              good_subjects,
                              labels,
                              laterality,
                              switch=True):
    good_subjects_fname = op.join(root_path, 'good_subjects.npz')
    mann_whitney_results_fname = op.join(root_path, 'mann_whitney_results.pkl')
    # good_subjects, disturbed_inds, preserved_inds, laterality = take_only_linda_subjects(
    #     good_subjects, disturbed_inds, preserved_inds, laterality)
    if True:  # op.isfile(mann_whitney_results_fname):
        mann_whitney_results = {}
        res, res_name = std_mean_res, 'std_mean_res'
        # for res, res_name in zip([dFC_res, std_mean_res], ['dFC_res', 'std_mean_res']): # stat_conn_res
        # for res, res_name in zip([std_mean_res], ['std_mean_res']):  # stat_conn_res
        if switch:
            res, rois_inds = switch_laterality(res, good_subjects, labels,
                                               laterality)
        else:
            rois_inds = find_labels_inds(labels)
        mann_whitney_results[res_name] = run_stat(res, disturbed_inds,
                                                  preserved_inds)
        print(mann_whitney_results[res_name])
        plot_comparisson_bars(res, res_name, labels[rois_inds], disturbed_inds,
                              preserved_inds, mann_whitney_results[res_name])
        utils.save(mann_whitney_results, mann_whitney_results_fname)
        np.savez(good_subjects_fname,
                 good_subjects=good_subjects,
                 labels=labels)
    else:
        mann_whitney_results = utils.load(mann_whitney_results_fname)
        d = np.load(good_subjects_fname)
        good_subjects = d['good_subjects']
        labels = d['labels']
    return mann_whitney_results, good_subjects, labels
Example #35
0
def read_eeg_sensors_layout(mri_subject):
    if not op.isfile(meg.INFO):
        raw = mne.io.read_raw_fif(meg.RAW)
        info = raw.info
        utils.save(info, meg.INFO)
    else:
        info = utils.load(meg.INFO)
    eeg_picks = mne.io.pick.pick_types(info, meg=False, eeg=True)
    eeg_pos = np.array([info['chs'][k]['loc'][:3] for k in eeg_picks])
    eeg_names = np.array([info['ch_names'][k] for k in eeg_picks])
    fol = op.join(MMVT_DIR, mri_subject, 'eeg')
    utils.make_dir(fol)
    output_fname = op.join(fol, 'eeg_positions.npz')
    if len(eeg_pos) > 0:
        trans_files = glob.glob(op.join(SUBJECTS_MRI_DIR, '*COR*.fif'))
        if len(trans_files) == 1:
            trans = mne.transforms.read_trans(trans_files[0])
            head_mri_t = mne.transforms._ensure_trans(trans, 'head', 'mri')
            eeg_pos = mne.transforms.apply_trans(head_mri_t, eeg_pos)
            eeg_pos *= 1000
            np.savez(output_fname, pos=eeg_pos, names=eeg_names)
            return True
    return False
Example #36
0
def find_clusters_tval_hist(subject,
                            contrast_name,
                            output_fol,
                            input_fol='',
                            n_jobs=1):
    contrast, connectivity, _ = init_clusters(subject, contrast_name,
                                              input_fol)
    clusters = {}
    tval_values = np.arange(2, 20, 0.1)
    now = time.time()
    for ind, tval in enumerate(tval_values):
        try:
            # utils.time_to_go(now, ind, len(tval_values), 5)
            clusters[tval] = {}
            for hemi in utils.HEMIS:
                clusters[tval][hemi], _ = mne_clusters._find_clusters(
                    contrast[hemi], tval, connectivity=connectivity[hemi])
            print('tval: {:.2f}, len rh: {}, lh: {}'.format(
                tval, max(map(len, clusters[tval]['rh'])),
                max(map(len, clusters[tval]['rh']))))
        except:
            print('error with tval {}'.format(tval))
    utils.save(clusters, op.join(output_fol, 'clusters_tval_hist.pkl'))
Example #37
0
def trans_tal_coords(roi,
                     file_name,
                     subjects_dir,
                     template='colin27',
                     overwrite=False):
    subjects = {}
    output_fol = utils.make_dir(op.join(MMVT_DIR, template, 'rois_peaks'))
    csv_fname = op.join(output_fol, '{}.csv'.format(roi))
    pkl_fname = op.join(output_fol, '{}.pkl'.format(roi))
    if op.isfile(pkl_fname) and op.isfile(csv_fname) and not overwrite:
        print('Data already exist for {}'.format(roi))
        return
    driver = tu.yale_get_driver()
    files = list(utils.find_recursive(subjects_dir, file_name))
    for fname in tqdm(files):
        lines = list(utils.csv_file_reader(fname, delimiter=' '))
        subject = utils.namebase(utils.get_parent_fol(fname, 3))
        subjects[subject] = {}
        if len(lines) == 0:
            print()
            subjects[subject]['error'] = '{} is empty!'.format(fname)
            continue
        elif len(lines) > 1:
            print('More than one line in {}!'.format(fname))
            subjects[subject] = '>1'
            continue
        tal = [int(float(v)) for v in lines[0] if utils.is_float(v)]
        subjects[subject]['tal'] = tal
        subjects[subject]['mni'] = tu.yale_tal2mni(tal, driver)
    del driver
    print(subjects)
    with open(csv_fname, 'w') as csv_file:
        csv_writer = csv.writer(csv_file, delimiter=',')
        for subject, subject_data in subjects.items():
            if 'mni' in subject_data:
                csv_writer.writerow(subjects[subject]['mni'])
    utils.save(subjects, pkl_fname)
Example #38
0
def plot_norm_data(x_cond,
                   x_baseline,
                   con_names,
                   condition,
                   threshold,
                   nodes_names,
                   stc_data,
                   stc_times,
                   windows_len=100,
                   windows_shift=10,
                   figures_fol='',
                   ax=None,
                   nodes_names_includes_hemi=False):
    # con_norm = x_cond - x_baseline
    # con_norm = x_cond - x_cond[:, :200].mean(axis=1, keepdims=True)
    # baseline_std = np.std(x_baseline, axis=1, keepdims=True)
    # baseline_mean = np.mean(x_baseline, axis=1, keepdims=True)
    windows_num = x_cond.shape[1]
    dt = (stc_times[-1] - stc_times[windows_len]) / windows_num
    time = np.arange(stc_times[windows_len], stc_times[-1], dt)[:-1]
    t0, t1 = np.where(time > -0.1)[0][0], np.where(time > 1)[0][0]

    # baseline_mean = np.max(x_cond[:, :t0], axis=1, keepdims=True)
    # baseline_std = np.std(x_cond[:, :t0], axis=1, keepdims=True)

    # con_norm = (x_cond - baseline_mean)  / baseline_std
    con_norm = x_cond - x_baseline
    fig_fname = op.join(figures_fol, 'ictal-baseline',
                        '{}-connectivity-ictal-baseline.jpg'.format(condition))
    connection_fname = utils.change_fname_extension(fig_fname, 'pkl')

    norm = {}
    if ax is None:
        fig = plt.figure()
        ax = fig.add_subplot(111)
    conn_conditions = list(product(['within', 'between'], utils.HEMIS))
    colors = ['c', 'b', 'k', 'm']
    lines, labels = [], []
    no_ord_con_names = [con_name.split(' ')[0] for con_name in con_names]

    connections = []
    for conn_type, color in zip(conn_conditions, colors):
        mask = epi_utils.filter_connections(
            con_norm,
            no_ord_con_names,
            threshold,
            nodes_names,
            conn_type,
            use_abs=False,
            nodes_names_includes_hemi=nodes_names_includes_hemi)
        if sum(mask) == 0:
            print('{} no connections {}'.format(condition, conn_type))
            continue
        else:
            print('{}: {} connection for {} {}'.format(condition, sum(mask),
                                                       conn_type[0],
                                                       conn_type[1]))
        names = np.array(con_names)[mask]
        norm[conn_type] = con_norm[mask]
        # print('windows num: {} windows length: {:.2f}ms windows shift: {:2f}ms'.format(
        #     windows_num, (stc_times[windows_len] - stc_times[0]) * 1000, dt * 1000))
        marker = '+' if conn_type[0] == 'within' else 'x'
        label_title = ' '.join(
            conn_type) if conn_type[0] == 'within' else '{} to {}'.format(
                *conn_type)
        first = True
        for k in range(norm[conn_type].shape[0]):
            first_sec_max = norm[conn_type][k][t0:t1].max()
            if norm[conn_type][k][t0:t1].max() > 2:
                # if conn_type[0] == 'between':
                first_sec_max_t = norm[conn_type][k][t0:t1].argmax()
                connections.append((time[first_sec_max_t + t0], label_title,
                                    first_sec_max, names[k]))
                l = ax.scatter(time, norm[conn_type][k],
                               color=color)  #, marker=marker) # .max(0)
                if first:
                    lines.append(l)
                    labels.append(label_title)
                    first = False
        conn_type = (conn_type[0],
                     'right') if conn_type[1] == 'rh' else (conn_type[0],
                                                            'left')

    connections = sorted(connections)
    for con in connections:
        print(con)
    utils.save(connections, connection_fname)
    if stc_data is not None:
        ax2 = ax.twinx()
        l = ax2.plot(stc_times[windows_len:],
                     stc_data[windows_len:].T,
                     'y--',
                     alpha=0.2)  # stc_data[:-100].T
        lines.append(l[0])
        labels.append('Source normalized activity')
        # ax2.set_ylim([0.5, 4.5])
        # ax2.set_xlim([])
        # ax2.set_yticks(range(1, 5))
        ax2.set_ylabel('Source z-values', fontsize=12)
    # ax.set_xticks(time)
    # xticklabels = ['{}-{}'.format(t, t + windows_shift) for t in time]
    # xticklabels[2] = '{}\nonset'.format(xticklabels[2])
    # ax.set_xticklabels(xticklabels, rotation=30)
    ax.set_ylabel('Causality: Interictals\n minus Baseline', fontsize=12)
    # ax.set_yticks([0, 0.5])
    ax.set_ylim(bottom=0)  #, 0.7])
    # ax.axvline(x=x_axis[10], color='r', linestyle='--')
    plt.title('{} ictal-baseline ({} connections)'.format(
        condition, x_cond.shape[0]))

    # labs = [*conn_conditions, 'Source normalized activity']
    # ax.legend([l1[conn_conditions[k]][0] for k in range(4)] + l2, labs, loc=0)
    # ax.legend([l1[conn_conditions[0]]] + [l1[conn_conditions[1]]] + l2, labs, loc=0)
    ax.legend(lines, labels, loc='upper right')  #loc=0)
    plt.axvline(x=0, linestyle='--', color='k')
    # if ax is None:
    if figures_fol != '':
        plt.savefig(fig_fname, dpi=300)
        print('Figure was saved in {}'.format(fig_fname))
        plt.close()
    else:
        plt.show()
Example #39
0
def normalize_connectivity(subject, ictals_clips, modality, atlas, divide_by_baseline_std, threshold,
                           reduce_to_3d, time_axis=None, overwrite=False, n_jobs=6):
    # https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.ttest_1samp.html
    import scipy.stats # t, p = scipy.stats.ttest_1samp
    import matplotlib.pyplot as plt
    calc_method = 'baseline_correction'
    top_k=5
    include = None #('superiorfrontal', 'parstriangularis', 'rostralmiddlefrontal') #, 'insula')
    baseline_con_fname = op.join(MMVT_DIR, subject, 'connectivity', '{}_baseline_{}_gc.npz'.format(modality, atlas))
    connectivity_template = op.join(MMVT_DIR, subject, 'connectivity', '{}_all_{}_{}_gc.npz'.format(
        modality, '{clip_name}', atlas))
    figures_fol = utils.make_dir(op.join(MMVT_DIR, subject, 'figures', 'gc'))
    if not op.isfile(baseline_con_fname) or overwrite:
        baseline_con_values1, baseline_con_values2 = calc_baseline_connectivity(ictals_clips, connectivity_template)
        print('Saving baseline connectivity {}'.format(baseline_con_fname))
        np.savez(baseline_con_fname, con_values=baseline_con_values1, con_values2=baseline_con_values2)
    else:
        print('Loading baseline connectivity {}'.format(baseline_con_fname))
        d_baseline = np.load(baseline_con_fname)
        baseline_con_values1, baseline_con_values2 = d_baseline['con_values'], d_baseline['con_values2']

    for clip_fname in ictals_clips['ictal']:
        clip_name = utils.namebase(clip_fname)
        print('\n\nAnalyzing {}'.format(clip_name))
        output_fname = op.join(MMVT_DIR, subject, 'connectivity', '{}_{}_{}_sig_con.pkl'.format(
            modality, clip_name, atlas))
        if False: #op.isfile(output_fname) and not overwrite:
            sig_con1, sig_con2, names1, names2 = utils.load(output_fname)
        else:
            con_ictal_fname = connectivity_template.format(clip_name=clip_name)
            d_ictal = utils.Bag(np.load(con_ictal_fname, allow_pickle=True))
            con_values1 = connectivity.find_best_ord(d_ictal.con_values, False)
            con_values2 = connectivity.find_best_ord(d_ictal.con_values2, False)
            # names = np.concatenate((d_cond['con_names'][mask1], d_cond['con_names2'][mask2]))
            C, T = con_values1.shape
            sig_con1, sig_con2, names1, names2 = [[]] * T, [[]] * T, [[]] * T, [[]] * T
            for t in range(T):
                inds = np.where(con_values1[:, t] < con_values2[:, t])
                con_values1[inds, t] = 0
                inds2 = np.where(con_values2[:, t] < con_values1[:, t])
                con_values2[inds2, t] = 0

                if calc_method == 'ttest_1samp':
                    res1 = scipy.stats.ttest_1samp(baseline_con_values1, con_values1[:, t], axis=1)[0]
                    res2 = scipy.stats.ttest_1samp(baseline_con_values2, con_values2[:, t], axis=1)[0]
                elif calc_method == 'zvals':
                    res1 = (con_values1[:, t] - baseline_con_values1.mean(1)) / baseline_con_values1.std(1)
                    res2 = (con_values2[:, t] - baseline_con_values2.mean(1)) / baseline_con_values2.std(1)
                elif calc_method == 'baseline_correction':
                    res1 = (con_values1[:, t] - baseline_con_values1.mean(1))
                    res2 = (con_values2[:, t] - baseline_con_values2.mean(1))
                if include is None:
                    mask1 = np.where(res1 > sorted(res1)[-top_k])[0]
                    mask2 = np.where(res2 > sorted(res2)[-top_k])[0]
                else:
                    mask1 = np.where(res1 > 0)[0] # sorted(ttest_res1)[-top_k])[0]
                    mask2 = np.where(res2 > 0)[0] #sorted(ttest_res2)[-top_k])[0]
                sig_con1[t] = res1[mask1]
                sig_con2[t] = res2[mask2]
                names1[t] = d_ictal['con_names'][mask1]
                names2[t] = d_ictal['con_names'][mask2]
                # print('Time {}, x->y {} connections > {}, y->x {} connections > {}'.format(
                #     t, len(sig_con1[t]), p_val_threshold, len(sig_con2[t]), p_val_threshold))
            print('Saving results in {}'.format(output_fname))
            utils.save((sig_con1, sig_con2, names1, names2), output_fname)
        plots.plot_pvalues(clip_name, time_axis, sig_con1, sig_con2, names1, names2, include, figures_fol)
Example #40
0
def save_to_mmvt(subject, tracks, header, tracks_name):
    dti_fol = utils.make_dir(op.join(MMVT_DIR, subject, 'dti'))
    np.save(op.join(dti_fol, '{}_tracks.npy'.format(tracks_name)), tracks)
    utils.save(header, op.join(dti_fol, '{}_header.pkl'.format(tracks_name)))
Example #41
0
def plot_labels(subject, labels_names, title_dict):
    # import seaborn as sns
    from src.mmvt_addon import colors_utils as cu
    sns.set(style="darkgrid")
    sns.set(color_codes=True)
    healthy_data = defaultdict(dict)
    subject_data = defaultdict(dict)
    root_fol = op.join(BLENDER_ROOT_DIR, subject, 'meg_evoked_files')
    if not op.isfile(op.join(root_fol, 'all_data.pkl')):
        for hemi in utils.HEMIS:
            d = np.load(op.join(root_fol, 'healthy_labels_all_data_win_100_{}.npz'.format(hemi)))
            f = np.load(op.join(BLENDER_ROOT_DIR, subject, 'labels_data_{}.npz'.format(hemi)))
            for label_ind, label_name in enumerate(d['names']):
                if label_name in labels_names.keys():
                    for cond_id, cond_name in enumerate(d['conditions']):
                        healthy_data[cond_name][label_name] = d['data'][label_ind, :, cond_id, :]
                        subject_data[cond_name][label_name] = f['data'][label_ind, :, cond_id]
        T = f['data'].shape[1]
        utils.save((subject_data, healthy_data, T), op.join(root_fol, 'all_data.pkl'))
    else:
        subject_data, healthy_data, T = utils.load(op.join(root_fol, 'all_data.pkl'))
    colors = cu.boynton_colors
    utils.make_dir(op.join(BLENDER_ROOT_DIR, subject, 'pics'))
    x_axis = np.arange(-2000, T - 2000, 1000)
    x_labels = [str(int(t / 1000)) for t in x_axis]
    # x_labels[2] = '(Risk onset) 2'
    # x_labels[3] = '(Reward onset) 3'
    img_width, img_height = 1024, 768
    dpi = 200
    ylim = 1.6
    w, h = img_width/dpi, img_height/dpi
    for cond_name in healthy_data.keys():
        sns.plt.figure(figsize=(w, h), dpi=dpi)
        sns.plt.xticks(x_axis, x_labels)
        sns.plt.xlabel('Time (s)')
        sns.plt.title(title_dict[cond_name])
        sns.plt.subplots_adjust(bottom=0.14)
        # sns.set_style('white')
        sns.despine()

        labels = []
        color_ind = 0
        for label_name, label_real_name in labels_names.items():
            sns.tsplot(data=healthy_data[cond_name][label_name].T,
                time=np.arange(-2000, healthy_data[cond_name][label_name].shape[0] - 2000), color=colors[color_ind])
            labels.append('healthy {}'.format(label_real_name))
            color_ind += 1
        for label_name, label_real_name in labels_names.items():
            sns.tsplot(data=subject_data[cond_name][label_name].T,
                time=np.arange(-2000, subject_data[cond_name][label_name].shape[0] - 2000), color=colors[color_ind])
            labels.append('{} {}'.format(subject, label_real_name))
            color_ind += 1
        sns.plt.legend(labels)
        sns.plt.axvline(0, color='k', linestyle='--', lw=1)
        sns.plt.axvline(1000, color='k', linestyle='--', lw=1)
        sns.plt.text(-400, ylim * 0.8, 'Risk onset', rotation=90, fontsize=10)
        sns.plt.text(600, ylim * 0.83, 'Reward onset', rotation=90, fontsize=10)
        sns.plt.ylim([0, ylim])
        sns.plt.xlim([-2000, 7000])
        # sns.plt.show()
        pic_fname = op.join(BLENDER_ROOT_DIR, subject, 'pics', '{}_vs_health_{}.jpg'.format(subject, cond_name))
        print('Saving {}'.format(pic_fname))
        sns.plt.savefig(pic_fname, dpi=dpi)